system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include<hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<iostream>
#define CHECK 0
const unsigned int SINGLE_PRECISION = 1;
const unsigned int DOUBLE_PRECISION = 0;
float *SMd, *SNd, *SPd;
double *DMd, *DNd, *DPd;
const unsigned int WIDTH = 1024;
//generate matrix
template<typename T>
T *GenMatrix(const unsigned int width, const unsigned int height)
{
T *matrix;
const unsigned int M_SIZE = width*height;
unsigned int i = 0, j = 0;
matrix = (T*) malloc(M_SIZE * sizeof(double));
for(i = 0 ;i < height; i++){
for(j = 0 ;j < width; j++){
matrix[i * width + j] = (rand()*1.0)/ RAND_MAX;
}
}
return matrix;
}
//display matrix
template<typename T>
int PrintMatrix(T *P, const unsigned int width, const unsigned int height)
{
unsigned int i = 0, j = 0;
printf("\n");
for(i = 0 ;i < height; i++){
for(j = 0 ;j < width; j++){
printf("%.3f\t", P[i * width + j]);
}
printf("\n");
}
return 1;
}
//Init data
template<typename T>
void Init_Cuda(T *M, T *N, const unsigned int width, const unsigned int height, bool sp)
{
const unsigned int size = width*height*sizeof(T);
//allocate matrix
if(sp==SINGLE_PRECISION){
hipMalloc((void**)&SMd, size);
hipMemcpy(SMd, M, size, hipMemcpyHostToDevice);
hipMalloc((void**)&SNd, size);
hipMemcpy(SNd, N, size,hipMemcpyHostToDevice);
hipMalloc((void**)&SPd, size);
hipMemset(SPd, 0, size);
}
else
{
hipMalloc((void**)&DMd, size);
hipMemcpy(DMd, M, size, hipMemcpyHostToDevice);
hipMalloc((void**)&DNd, size);
hipMemcpy(DNd, N, size,hipMemcpyHostToDevice);
hipMalloc((void**)&DPd, size);
hipMemset(DPd, 0, size);
}
}
//Free memory
void Free_Cuda(bool sp)
{
if(sp==SINGLE_PRECISION){
hipFree(SMd);
hipFree(SNd);
hipFree(SPd);
}
else
{
hipFree(DMd);
hipFree(DNd);
hipFree(DPd);
}
}
//kernel function
template<typename T>
__global__ void MatrixAddKernel(T *P, const T *M, const T *N, const unsigned int width)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int length = width * width;
while (i < length) {
P[i] = M[i] + N[i];
i += gridDim.x * blockDim.x;
}
}
template<typename T>
int MatrixAdd(T *P, const T *M, const T *N, const unsigned int n)
{
int i, j ;
for(i = 0 ;i < n; i++)
for(j = 0 ;j < n; j++)
{ P [ i* n + j] = M[ i* n + j] + N[ i* n + j];
}
return 0;
}
template<typename T>
int Check(const T *KP, const T *CP, const unsigned int n)
{
int i, j;
T e = 0.001;
int correct = 1;
for(i = 0; i < n ; i++)
for(j = 0; j < n; j++)
{ if(abs(KP[i * n + j] - CP[i * n + j]) > e)
{ printf("%.10f %.10f\n", KP[i * n + j], CP[i * n + j]);
return 0;
}
}
return correct;
}
int main(int argc, char * argv[])
{
bool sp = 1;
float *SM, *SN, *SKP, *SCP;
double *DM, *DN, *DKP, *DCP;
hipEvent_t start, stop;
float elapsedTime;
unsigned int width;
width = WIDTH;
//create number of blocks and number of threads
int Thr = 128;
dim3 block(Thr, 1, 1);
dim3 grid(((width*width)+ Thr - 1) / Thr, 1, 1);
if (argc != 5)
{
/* We print argv[0] assuming it is the program name */
printf("Wrong parameters. Please use the following format for running.\n");
printf(" Usage: %s %s %s %s %s", argv[0], "[matrix_size]", "[single|double]", "[divide_val]", "[num_threads]\n");
exit(EXIT_FAILURE);
} else {
width = atoi(argv[1]);
sp = atoi(argv[2]);
block.x = atoi(argv[4]);
grid.x = ((width*width)/atoi(argv[3]) + block.x - 1) / block.x;
if(atoi(argv[2])!=0)
sp = SINGLE_PRECISION;
else
sp = DOUBLE_PRECISION;
}
//for using MatrixMul_Kernel_Tiled_SM kernel
//block.x = TILE_WIDTH; block.y=TILE_WIDTH;
//grid.x = WIDTH/TILE_WIDTH; grid.y = WIDTH/TILE_WIDTH;
//initialize host memory
if(sp==SINGLE_PRECISION)
{
SM = GenMatrix<float>(width, width);
//PrintMatrix(M, width, width);
SN = GenMatrix<float>(width, width);
//PrintMatrix(N, width, width);
SKP = GenMatrix<float>(width, width);
SCP = GenMatrix<float>(width, width);
//initialize device memory
Init_Cuda<float>(SM, SN, width, width, SINGLE_PRECISION);
}
else
{
DM = GenMatrix<double>(width, width);
//PrintMatrix(M, width, width);
DN = GenMatrix<double>(width, width);
//PrintMatrix(N, width, width);
DKP = GenMatrix<double>(width, width);
DCP = GenMatrix<double>(width, width);
//initialize device memory
Init_Cuda<double>(DM, DN, width, width, DOUBLE_PRECISION);
}
//create cudaEvent start and stop to record elapsed time
hipEventCreate(&start);
hipEventCreate(&stop);
//record start time to start event
hipEventRecord(start, 0);
//launch kernel
if(sp==SINGLE_PRECISION)
{
MatrixAddKernel<float><<<grid, block>>>(SPd, SMd, SNd, width);
}
else
{
MatrixAddKernel<double><<<grid, block>>>(DPd, DMd, DNd, width);
}
//record start time to stop event
hipEventRecord(stop, 0);
//synchronize the stop event
hipEventSynchronize(stop);
//calculate the elapsed time
hipEventElapsedTime(&elapsedTime, start, stop);
//destroy the start and stop event
hipEventDestroy(start);
hipEventDestroy(stop);
//copy data from device memory to host memory
if(sp==SINGLE_PRECISION)
hipMemcpy(SKP, SPd, width*width*sizeof(float), hipMemcpyDeviceToHost);
else
hipMemcpy(DKP, DPd, width*width*sizeof(double), hipMemcpyDeviceToHost);
//PrintMatrix(P, width, width);
//print runtime
printf("[ %s ][ %4dx%4d ][ %10d blocks ][ %5d threads ]\t>\t[ %7.3f (ms) ]\n", ((sp==SINGLE_PRECISION)?"Single Precision":"Double Precision"), width, width, grid.x, block.x, elapsedTime);
#if (CHECK==1)
if(sp==SINGLE_PRECISION)
{
/*printf("M >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<float>(SM, width, width);
printf("N >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<float>(SN, width, width);
printf("KP >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<float>(SKP, width, width);
*/
MatrixAdd<float>(SCP, SM, SN, width);
//printf("CP >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
//PrintMatrix<float>(CP, width, width);
if(Check<float>(SKP, SCP, width))
printf("We do it.\n");
else
printf("Something is wrong.\n");
}
else
{
/*printf("M >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<double>(DM, width, width);
printf("N >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<double>(DN, width, width);
printf("KP >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<double>(DKP, width, width);
*/
MatrixAdd<double>(DCP, DM, DN, width);
//printf("CP >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
//PrintMatrix<double>(DCP, width, width);
if(Check<double>(DKP, DCP, width))
printf("We do it.\n");
else
printf("Something is wrong.\n");
}
#endif
//free host memory
if(sp==SINGLE_PRECISION)
{
free(SM);
free(SN);
free(SKP);
free(SCP);
//free device memory
Free_Cuda(sp);
}
else
{
free(DM);
free(DN);
free(DKP);
free(DCP);
//free device memory
Free_Cuda(sp);
}
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.section .text._Z15MatrixAddKernelIfEvPT_PKS0_S3_j,"axG",@progbits,_Z15MatrixAddKernelIfEvPT_PKS0_S3_j,comdat
.protected _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.globl _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.p2align 8
.type _Z15MatrixAddKernelIfEvPT_PKS0_S3_j,@function
_Z15MatrixAddKernelIfEvPT_PKS0_S3_j:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x18
s_add_u32 s2, s0, 32
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s4, 0xffff
s_mul_i32 s8, s8, s8
v_mad_u64_u32 v[1:2], null, s15, s9, v[0:1]
s_mov_b32 s4, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s8, v1
s_cbranch_execz .LBB0_3
s_load_b32 s10, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s1, s10, s9
s_mov_b32 s9, 0
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_nc_u32_e32 v1, s1, v1
v_add_co_u32 v5, vcc_lo, s6, v3
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v4, vcc_lo
v_add_co_u32 v7, vcc_lo, s2, v3
v_add_co_ci_u32_e32 v8, vcc_lo, s3, v4, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s8, v1
global_load_b32 v0, v[5:6], off
global_load_b32 v5, v[7:8], off
v_add_co_u32 v3, s0, s4, v3
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s0, s5, v4, s0
s_or_b32 s9, vcc_lo, s9
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v0, v5
global_store_b32 v[3:4], v0, off
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._Z15MatrixAddKernelIfEvPT_PKS0_S3_j,"axG",@progbits,_Z15MatrixAddKernelIfEvPT_PKS0_S3_j,comdat
.Lfunc_end0:
.size _Z15MatrixAddKernelIfEvPT_PKS0_S3_j, .Lfunc_end0-_Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.section .AMDGPU.csdata,"",@progbits
.section .text._Z15MatrixAddKernelIdEvPT_PKS0_S3_j,"axG",@progbits,_Z15MatrixAddKernelIdEvPT_PKS0_S3_j,comdat
.protected _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.globl _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.p2align 8
.type _Z15MatrixAddKernelIdEvPT_PKS0_S3_j,@function
_Z15MatrixAddKernelIdEvPT_PKS0_S3_j:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x18
s_add_u32 s2, s0, 32
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s4, 0xffff
s_mul_i32 s8, s8, s8
v_mad_u64_u32 v[1:2], null, s15, s9, v[0:1]
s_mov_b32 s4, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s8, v1
s_cbranch_execz .LBB1_3
s_load_b32 s10, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s1, s10, s9
s_mov_b32 s9, 0
.p2align 6
.LBB1_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 3, v[1:2]
v_add_nc_u32_e32 v1, s1, v1
v_add_co_u32 v5, vcc_lo, s6, v3
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v4, vcc_lo
v_add_co_u32 v7, vcc_lo, s2, v3
v_add_co_ci_u32_e32 v8, vcc_lo, s3, v4, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s8, v1
global_load_b64 v[5:6], v[5:6], off
global_load_b64 v[7:8], v[7:8], off
v_add_co_u32 v3, s0, s4, v3
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s0, s5, v4, s0
s_or_b32 s9, vcc_lo, s9
s_waitcnt vmcnt(0)
v_add_f64 v[5:6], v[5:6], v[7:8]
global_store_b64 v[3:4], v[5:6], off
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB1_2
.LBB1_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._Z15MatrixAddKernelIdEvPT_PKS0_S3_j,"axG",@progbits,_Z15MatrixAddKernelIdEvPT_PKS0_S3_j,comdat
.Lfunc_end1:
.size _Z15MatrixAddKernelIdEvPT_PKS0_S3_j, .Lfunc_end1-_Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15MatrixAddKernelIfEvPT_PKS0_S3_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15MatrixAddKernelIdEvPT_PKS0_S3_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include<hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<iostream>
#define CHECK 0
const unsigned int SINGLE_PRECISION = 1;
const unsigned int DOUBLE_PRECISION = 0;
float *SMd, *SNd, *SPd;
double *DMd, *DNd, *DPd;
const unsigned int WIDTH = 1024;
//generate matrix
template<typename T>
T *GenMatrix(const unsigned int width, const unsigned int height)
{
T *matrix;
const unsigned int M_SIZE = width*height;
unsigned int i = 0, j = 0;
matrix = (T*) malloc(M_SIZE * sizeof(double));
for(i = 0 ;i < height; i++){
for(j = 0 ;j < width; j++){
matrix[i * width + j] = (rand()*1.0)/ RAND_MAX;
}
}
return matrix;
}
//display matrix
template<typename T>
int PrintMatrix(T *P, const unsigned int width, const unsigned int height)
{
unsigned int i = 0, j = 0;
printf("\n");
for(i = 0 ;i < height; i++){
for(j = 0 ;j < width; j++){
printf("%.3f\t", P[i * width + j]);
}
printf("\n");
}
return 1;
}
//Init data
template<typename T>
void Init_Cuda(T *M, T *N, const unsigned int width, const unsigned int height, bool sp)
{
const unsigned int size = width*height*sizeof(T);
//allocate matrix
if(sp==SINGLE_PRECISION){
hipMalloc((void**)&SMd, size);
hipMemcpy(SMd, M, size, hipMemcpyHostToDevice);
hipMalloc((void**)&SNd, size);
hipMemcpy(SNd, N, size,hipMemcpyHostToDevice);
hipMalloc((void**)&SPd, size);
hipMemset(SPd, 0, size);
}
else
{
hipMalloc((void**)&DMd, size);
hipMemcpy(DMd, M, size, hipMemcpyHostToDevice);
hipMalloc((void**)&DNd, size);
hipMemcpy(DNd, N, size,hipMemcpyHostToDevice);
hipMalloc((void**)&DPd, size);
hipMemset(DPd, 0, size);
}
}
//Free memory
void Free_Cuda(bool sp)
{
if(sp==SINGLE_PRECISION){
hipFree(SMd);
hipFree(SNd);
hipFree(SPd);
}
else
{
hipFree(DMd);
hipFree(DNd);
hipFree(DPd);
}
}
//kernel function
template<typename T>
__global__ void MatrixAddKernel(T *P, const T *M, const T *N, const unsigned int width)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int length = width * width;
while (i < length) {
P[i] = M[i] + N[i];
i += gridDim.x * blockDim.x;
}
}
template<typename T>
int MatrixAdd(T *P, const T *M, const T *N, const unsigned int n)
{
int i, j ;
for(i = 0 ;i < n; i++)
for(j = 0 ;j < n; j++)
{ P [ i* n + j] = M[ i* n + j] + N[ i* n + j];
}
return 0;
}
template<typename T>
int Check(const T *KP, const T *CP, const unsigned int n)
{
int i, j;
T e = 0.001;
int correct = 1;
for(i = 0; i < n ; i++)
for(j = 0; j < n; j++)
{ if(abs(KP[i * n + j] - CP[i * n + j]) > e)
{ printf("%.10f %.10f\n", KP[i * n + j], CP[i * n + j]);
return 0;
}
}
return correct;
}
int main(int argc, char * argv[])
{
bool sp = 1;
float *SM, *SN, *SKP, *SCP;
double *DM, *DN, *DKP, *DCP;
hipEvent_t start, stop;
float elapsedTime;
unsigned int width;
width = WIDTH;
//create number of blocks and number of threads
int Thr = 128;
dim3 block(Thr, 1, 1);
dim3 grid(((width*width)+ Thr - 1) / Thr, 1, 1);
if (argc != 5)
{
/* We print argv[0] assuming it is the program name */
printf("Wrong parameters. Please use the following format for running.\n");
printf(" Usage: %s %s %s %s %s", argv[0], "[matrix_size]", "[single|double]", "[divide_val]", "[num_threads]\n");
exit(EXIT_FAILURE);
} else {
width = atoi(argv[1]);
sp = atoi(argv[2]);
block.x = atoi(argv[4]);
grid.x = ((width*width)/atoi(argv[3]) + block.x - 1) / block.x;
if(atoi(argv[2])!=0)
sp = SINGLE_PRECISION;
else
sp = DOUBLE_PRECISION;
}
//for using MatrixMul_Kernel_Tiled_SM kernel
//block.x = TILE_WIDTH; block.y=TILE_WIDTH;
//grid.x = WIDTH/TILE_WIDTH; grid.y = WIDTH/TILE_WIDTH;
//initialize host memory
if(sp==SINGLE_PRECISION)
{
SM = GenMatrix<float>(width, width);
//PrintMatrix(M, width, width);
SN = GenMatrix<float>(width, width);
//PrintMatrix(N, width, width);
SKP = GenMatrix<float>(width, width);
SCP = GenMatrix<float>(width, width);
//initialize device memory
Init_Cuda<float>(SM, SN, width, width, SINGLE_PRECISION);
}
else
{
DM = GenMatrix<double>(width, width);
//PrintMatrix(M, width, width);
DN = GenMatrix<double>(width, width);
//PrintMatrix(N, width, width);
DKP = GenMatrix<double>(width, width);
DCP = GenMatrix<double>(width, width);
//initialize device memory
Init_Cuda<double>(DM, DN, width, width, DOUBLE_PRECISION);
}
//create cudaEvent start and stop to record elapsed time
hipEventCreate(&start);
hipEventCreate(&stop);
//record start time to start event
hipEventRecord(start, 0);
//launch kernel
if(sp==SINGLE_PRECISION)
{
MatrixAddKernel<float><<<grid, block>>>(SPd, SMd, SNd, width);
}
else
{
MatrixAddKernel<double><<<grid, block>>>(DPd, DMd, DNd, width);
}
//record start time to stop event
hipEventRecord(stop, 0);
//synchronize the stop event
hipEventSynchronize(stop);
//calculate the elapsed time
hipEventElapsedTime(&elapsedTime, start, stop);
//destroy the start and stop event
hipEventDestroy(start);
hipEventDestroy(stop);
//copy data from device memory to host memory
if(sp==SINGLE_PRECISION)
hipMemcpy(SKP, SPd, width*width*sizeof(float), hipMemcpyDeviceToHost);
else
hipMemcpy(DKP, DPd, width*width*sizeof(double), hipMemcpyDeviceToHost);
//PrintMatrix(P, width, width);
//print runtime
printf("[ %s ][ %4dx%4d ][ %10d blocks ][ %5d threads ]\t>\t[ %7.3f (ms) ]\n", ((sp==SINGLE_PRECISION)?"Single Precision":"Double Precision"), width, width, grid.x, block.x, elapsedTime);
#if (CHECK==1)
if(sp==SINGLE_PRECISION)
{
/*printf("M >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<float>(SM, width, width);
printf("N >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<float>(SN, width, width);
printf("KP >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<float>(SKP, width, width);
*/
MatrixAdd<float>(SCP, SM, SN, width);
//printf("CP >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
//PrintMatrix<float>(CP, width, width);
if(Check<float>(SKP, SCP, width))
printf("We do it.\n");
else
printf("Something is wrong.\n");
}
else
{
/*printf("M >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<double>(DM, width, width);
printf("N >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<double>(DN, width, width);
printf("KP >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<double>(DKP, width, width);
*/
MatrixAdd<double>(DCP, DM, DN, width);
//printf("CP >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
//PrintMatrix<double>(DCP, width, width);
if(Check<double>(DKP, DCP, width))
printf("We do it.\n");
else
printf("Something is wrong.\n");
}
#endif
//free host memory
if(sp==SINGLE_PRECISION)
{
free(SM);
free(SN);
free(SKP);
free(SCP);
//free device memory
Free_Cuda(sp);
}
else
{
free(DM);
free(DN);
free(DKP);
free(DCP);
//free device memory
Free_Cuda(sp);
}
return 0;
} | .text
.file "addm_s_d.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z9Free_Cudab # -- Begin function _Z9Free_Cudab
.p2align 4, 0x90
.type _Z9Free_Cudab,@function
_Z9Free_Cudab: # @_Z9Free_Cudab
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $SMd, %eax
movl $DMd, %ecx
testl %edi, %edi
cmovneq %rax, %rcx
movq (%rcx), %rdi
movl $SNd, %eax
movl $DNd, %ebx
cmovneq %rax, %rbx
movl $SPd, %eax
movl $DPd, %r14d
cmovneq %rax, %r14
callq hipFree
movq (%rbx), %rdi
callq hipFree
movq (%r14), %rdi
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
jmp hipFree # TAILCALL
.Lfunc_end0:
.size _Z9Free_Cudab, .Lfunc_end0-_Z9Free_Cudab
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x41dfffffffc00000 # double 2147483647
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $232, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r14
cmpl $5, %edi
jne .LBB1_54
# %bb.1:
movq 8(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r12
movq 32(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
movl %ebx, %eax
movabsq $4294967296, %r13 # imm = 0x100000000
orq %r13, %rax
movq %rax, 192(%rsp) # 8-byte Spill
movl %r12d, %r15d
imull %r15d, %r15d
movq 24(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rcx
movl %r15d, %eax
xorl %edx, %edx
divl %ecx
# kill: def $eax killed $eax def $rax
addl %ebx, %eax
decl %eax
xorl %edx, %edx
movq %rbx, 224(%rsp) # 8-byte Spill
divl %ebx
# kill: def $eax killed $eax def $rax
movq %rax, 216(%rsp) # 8-byte Spill
orq %rax, %r13
movq %r13, 184(%rsp) # 8-byte Spill
movq 16(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
movq %r15, 208(%rsp) # 8-byte Spill
leaq (,%r15,8), %rdi
movq %rdi, 40(%rsp) # 8-byte Spill
callq malloc
movq %rax, %r13
movq %rbx, 200(%rsp) # 8-byte Spill
testl %ebx, %ebx
movq %r12, 8(%rsp) # 8-byte Spill
movq %rax, 56(%rsp) # 8-byte Spill
je .LBB1_23
# %bb.2:
testl %r12d, %r12d
je .LBB1_7
# %bb.3: # %.preheader.i.preheader
movl 8(%rsp), %r14d # 4-byte Reload
xorl %ebp, %ebp
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_4: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB1_5 Depth 2
movl %ebp, %ebx
movq %r14, %r15
.p2align 4, 0x90
.LBB1_5: # Parent Loop BB1_4 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
movl %ebx, %eax
movss %xmm0, (%r13,%rax,4)
incl %ebx
decq %r15
jne .LBB1_5
# %bb.6: # %._crit_edge.i
# in Loop: Header=BB1_4 Depth=1
incl %r12d
movq 8(%rsp), %rax # 8-byte Reload
addl %eax, %ebp
cmpl %eax, %r12d
jne .LBB1_4
.LBB1_7: # %_Z9GenMatrixIfEPT_jj.exit
movq 40(%rsp), %rdi # 8-byte Reload
callq malloc
movq %rax, 24(%rsp) # 8-byte Spill
movq 8(%rsp), %r12 # 8-byte Reload
testl %r12d, %r12d
je .LBB1_12
# %bb.8: # %.preheader.i89.preheader
movl %r12d, %eax
movq %rax, 16(%rsp) # 8-byte Spill
xorl %ebp, %ebp
xorl %r13d, %r13d
movq 24(%rsp), %r14 # 8-byte Reload
.p2align 4, 0x90
.LBB1_9: # %.preheader.i89
# =>This Loop Header: Depth=1
# Child Loop BB1_10 Depth 2
movl %ebp, %ebx
movq 16(%rsp), %r15 # 8-byte Reload
.p2align 4, 0x90
.LBB1_10: # Parent Loop BB1_9 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
movl %ebx, %eax
movss %xmm0, (%r14,%rax,4)
incl %ebx
decq %r15
jne .LBB1_10
# %bb.11: # %._crit_edge.i95
# in Loop: Header=BB1_9 Depth=1
incl %r13d
addl %r12d, %ebp
cmpl %r12d, %r13d
jne .LBB1_9
.LBB1_12: # %_Z9GenMatrixIfEPT_jj.exit97
movq 40(%rsp), %rdi # 8-byte Reload
callq malloc
movq %rax, %rbp
testl %r12d, %r12d
je .LBB1_17
# %bb.13: # %.preheader.i100.preheader
movl %r12d, %eax
movq %rax, 16(%rsp) # 8-byte Spill
xorl %r13d, %r13d
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_14: # %.preheader.i100
# =>This Loop Header: Depth=1
# Child Loop BB1_15 Depth 2
movl %r13d, %ebx
movq 16(%rsp), %r15 # 8-byte Reload
.p2align 4, 0x90
.LBB1_15: # Parent Loop BB1_14 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
movl %ebx, %eax
movss %xmm0, (%rbp,%rax,4)
incl %ebx
decq %r15
jne .LBB1_15
# %bb.16: # %._crit_edge.i106
# in Loop: Header=BB1_14 Depth=1
incl %r14d
addl %r12d, %r13d
cmpl %r12d, %r14d
jne .LBB1_14
.LBB1_17: # %_Z9GenMatrixIfEPT_jj.exit108
movq 40(%rsp), %rdi # 8-byte Reload
callq malloc
movq %rax, %r13
testl %r12d, %r12d
je .LBB1_22
# %bb.18: # %.preheader.i111.preheader
movl %r12d, %eax
movq %rax, 16(%rsp) # 8-byte Spill
xorl %r15d, %r15d
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_19: # %.preheader.i111
# =>This Loop Header: Depth=1
# Child Loop BB1_20 Depth 2
movl %r15d, %r14d
movq 16(%rsp), %r12 # 8-byte Reload
.p2align 4, 0x90
.LBB1_20: # Parent Loop BB1_19 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
movl %r14d, %eax
movss %xmm0, (%r13,%rax,4)
incl %r14d
decq %r12
jne .LBB1_20
# %bb.21: # %._crit_edge.i117
# in Loop: Header=BB1_19 Depth=1
incl %ebx
movq 8(%rsp), %r12 # 8-byte Reload
addl %r12d, %r15d
cmpl %r12d, %ebx
jne .LBB1_19
.LBB1_22: # %_Z9GenMatrixIfEPT_jj.exit119
movl %r12d, %r14d
imull %r14d, %r14d
shll $2, %r14d
movl $SMd, %edi
movq %r14, %rsi
callq hipMalloc
movq SMd(%rip), %rdi
movq 56(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl $SNd, %edi
movq %r14, %rsi
callq hipMalloc
movq SNd(%rip), %rdi
movq 24(%rsp), %r15 # 8-byte Reload
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl $SPd, %ebx
movl $SPd, %edi
jmp .LBB1_44
.LBB1_23:
testl %r12d, %r12d
je .LBB1_28
# %bb.24: # %.preheader.i122.preheader
movl %r12d, %r14d
xorl %ebp, %ebp
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_25: # %.preheader.i122
# =>This Loop Header: Depth=1
# Child Loop BB1_26 Depth 2
movl %ebp, %r15d
movq %r14, %r12
.p2align 4, 0x90
.LBB1_26: # Parent Loop BB1_25 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
movl %r15d, %eax
movsd %xmm0, (%r13,%rax,8)
incl %r15d
decq %r12
jne .LBB1_26
# %bb.27: # %._crit_edge.i128
# in Loop: Header=BB1_25 Depth=1
incl %ebx
movq 8(%rsp), %r12 # 8-byte Reload
addl %r12d, %ebp
cmpl %r12d, %ebx
jne .LBB1_25
.LBB1_28: # %_Z9GenMatrixIdEPT_jj.exit
movq 40(%rsp), %rdi # 8-byte Reload
callq malloc
movq %rax, 24(%rsp) # 8-byte Spill
testl %r12d, %r12d
je .LBB1_33
# %bb.29: # %.preheader.i132.preheader
movl %r12d, %eax
movq %rax, 16(%rsp) # 8-byte Spill
xorl %ebp, %ebp
xorl %ebx, %ebx
movq 24(%rsp), %r14 # 8-byte Reload
.p2align 4, 0x90
.LBB1_30: # %.preheader.i132
# =>This Loop Header: Depth=1
# Child Loop BB1_31 Depth 2
movl %ebp, %r15d
movq 16(%rsp), %r12 # 8-byte Reload
.p2align 4, 0x90
.LBB1_31: # Parent Loop BB1_30 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
movl %r15d, %eax
movsd %xmm0, (%r14,%rax,8)
incl %r15d
decq %r12
jne .LBB1_31
# %bb.32: # %._crit_edge.i138
# in Loop: Header=BB1_30 Depth=1
incl %ebx
movq 8(%rsp), %r12 # 8-byte Reload
addl %r12d, %ebp
cmpl %r12d, %ebx
jne .LBB1_30
.LBB1_33: # %_Z9GenMatrixIdEPT_jj.exit140
movq 40(%rsp), %rdi # 8-byte Reload
callq malloc
movq %rax, %rbp
testl %r12d, %r12d
je .LBB1_38
# %bb.34: # %.preheader.i143.preheader
movl %r12d, %r14d
xorl %r13d, %r13d
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_35: # %.preheader.i143
# =>This Loop Header: Depth=1
# Child Loop BB1_36 Depth 2
movl %r13d, %r15d
movq %r14, %r12
.p2align 4, 0x90
.LBB1_36: # Parent Loop BB1_35 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
movl %r15d, %eax
movsd %xmm0, (%rbp,%rax,8)
incl %r15d
decq %r12
jne .LBB1_36
# %bb.37: # %._crit_edge.i149
# in Loop: Header=BB1_35 Depth=1
incl %ebx
movq 8(%rsp), %r12 # 8-byte Reload
addl %r12d, %r13d
cmpl %r12d, %ebx
jne .LBB1_35
.LBB1_38: # %_Z9GenMatrixIdEPT_jj.exit151
movq 40(%rsp), %rdi # 8-byte Reload
callq malloc
movq %rax, %r13
testl %r12d, %r12d
je .LBB1_43
# %bb.39: # %.preheader.i154.preheader
movl %r12d, %eax
movq %rax, 16(%rsp) # 8-byte Spill
xorl %r14d, %r14d
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_40: # %.preheader.i154
# =>This Loop Header: Depth=1
# Child Loop BB1_41 Depth 2
movl %r14d, %r15d
movq 16(%rsp), %r12 # 8-byte Reload
.p2align 4, 0x90
.LBB1_41: # Parent Loop BB1_40 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
movl %r15d, %eax
movsd %xmm0, (%r13,%rax,8)
incl %r15d
decq %r12
jne .LBB1_41
# %bb.42: # %._crit_edge.i160
# in Loop: Header=BB1_40 Depth=1
incl %ebx
movq 8(%rsp), %r12 # 8-byte Reload
addl %r12d, %r14d
cmpl %r12d, %ebx
jne .LBB1_40
.LBB1_43: # %_Z9GenMatrixIdEPT_jj.exit162
movl %r12d, %r14d
imull %r14d, %r14d
shll $3, %r14d
movl $DMd, %edi
movq %r14, %rsi
callq hipMalloc
movq DMd(%rip), %rdi
movq 56(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl $DNd, %edi
movq %r14, %rsi
callq hipMalloc
movq DNd(%rip), %rdi
movq 24(%rsp), %r15 # 8-byte Reload
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl $DPd, %ebx
movl $DPd, %edi
.LBB1_44:
movq %r14, %rsi
callq hipMalloc
movq (%rbx), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
leaq 64(%rsp), %rdi
callq hipEventCreate
leaq 48(%rsp), %rdi
callq hipEventCreate
movq 64(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 184(%rsp), %rdi # 8-byte Reload
movl $1, %esi
movq 192(%rsp), %rdx # 8-byte Reload
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
movq 200(%rsp), %rbx # 8-byte Reload
testl %ebx, %ebx
je .LBB1_47
# %bb.45:
movl $SPd, %ecx
movq %rcx, 24(%rsp) # 8-byte Spill
movl $2, %r14d
movl $.L.str.7, %ecx
movq %rcx, 8(%rsp) # 8-byte Spill
testl %eax, %eax
jne .LBB1_50
# %bb.46:
movq SPd(%rip), %rax
movq SMd(%rip), %rcx
movq SNd(%rip), %rdx
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %rdx, 120(%rsp)
movl %r12d, 36(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 36(%rsp), %rax
movq %rax, 168(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z15MatrixAddKernelIfEvPT_PKS0_S3_j, %edi
jmp .LBB1_49
.LBB1_47:
movl $DPd, %ecx
movq %rcx, 24(%rsp) # 8-byte Spill
movl $3, %r14d
movl $.L.str.8, %ecx
movq %rcx, 8(%rsp) # 8-byte Spill
testl %eax, %eax
jne .LBB1_50
# %bb.48:
movq DPd(%rip), %rax
movq DMd(%rip), %rcx
movq DNd(%rip), %rdx
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %rdx, 120(%rsp)
movl %r12d, 36(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 36(%rsp), %rax
movq %rax, 168(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z15MatrixAddKernelIdEvPT_PKS0_S3_j, %edi
.LBB1_49:
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_50:
movq 48(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 48(%rsp), %rdi
callq hipEventSynchronize
movq 64(%rsp), %rsi
movq 48(%rsp), %rdx
leaq 144(%rsp), %rdi
callq hipEventElapsedTime
movq 64(%rsp), %rdi
callq hipEventDestroy
movq 48(%rsp), %rdi
callq hipEventDestroy
movl 208(%rsp), %edx # 4-byte Reload
movq 24(%rsp), %rax # 8-byte Reload
movq (%rax), %rsi
movl %r14d, %ecx
shlq %cl, %rdx
movq %rbp, %rdi
movl $2, %ecx
callq hipMemcpy
movss 144(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.6, %edi
movq 8(%rsp), %rsi # 8-byte Reload
movl %r12d, %edx
movl %r12d, %ecx
movq 216(%rsp), %r8 # 8-byte Reload
# kill: def $r8d killed $r8d killed $r8
movq 224(%rsp), %r9 # 8-byte Reload
# kill: def $r9d killed $r9d killed $r9
movb $1, %al
callq printf
testl %ebx, %ebx
je .LBB1_52
# %bb.51:
movl $SPd, %r12d
movl $SNd, %r14d
movl $SMd, %ebx
jmp .LBB1_53
.LBB1_52:
movl $DPd, %r12d
movl $DNd, %r14d
movl $DMd, %ebx
.LBB1_53:
movq 56(%rsp), %rdi # 8-byte Reload
callq free
movq %r15, %rdi
callq free
movq %rbp, %rdi
callq free
movq %r13, %rdi
callq free
movq (%rbx), %rdi
callq hipFree
movq (%r14), %rdi
callq hipFree
movq (%r12), %rdi
callq hipFree
xorl %eax, %eax
addq $232, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_54:
.cfi_def_cfa_offset 288
movl $.Lstr, %edi
callq puts@PLT
movq (%r14), %rsi
movl $.L.str.1, %edi
movl $.L.str.2, %edx
movl $.L.str.3, %ecx
movl $.L.str.4, %r8d
movl $.L.str.5, %r9d
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.section .text._Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j,"axG",@progbits,_Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j,comdat
.weak _Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j # -- Begin function _Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j
.p2align 4, 0x90
.type _Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j,@function
_Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j: # @_Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15MatrixAddKernelIfEvPT_PKS0_S3_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j, .Lfunc_end2-_Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j
.cfi_endproc
# -- End function
.section .text._Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j,"axG",@progbits,_Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j,comdat
.weak _Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j # -- Begin function _Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j
.p2align 4, 0x90
.type _Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j,@function
_Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j: # @_Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15MatrixAddKernelIdEvPT_PKS0_S3_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size _Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j, .Lfunc_end3-_Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j
.cfi_endproc
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15MatrixAddKernelIfEvPT_PKS0_S3_j, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15MatrixAddKernelIdEvPT_PKS0_S3_j, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type SMd,@object # @SMd
.bss
.globl SMd
.p2align 3, 0x0
SMd:
.quad 0
.size SMd, 8
.type SNd,@object # @SNd
.globl SNd
.p2align 3, 0x0
SNd:
.quad 0
.size SNd, 8
.type SPd,@object # @SPd
.globl SPd
.p2align 3, 0x0
SPd:
.quad 0
.size SPd, 8
.type DMd,@object # @DMd
.globl DMd
.p2align 3, 0x0
DMd:
.quad 0
.size DMd, 8
.type DNd,@object # @DNd
.globl DNd
.p2align 3, 0x0
DNd:
.quad 0
.size DNd, 8
.type DPd,@object # @DPd
.globl DPd
.p2align 3, 0x0
DPd:
.quad 0
.size DPd, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz " Usage: %s %s %s %s %s"
.size .L.str.1, 23
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "[matrix_size]"
.size .L.str.2, 14
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "[single|double]"
.size .L.str.3, 16
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "[divide_val]"
.size .L.str.4, 13
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "[num_threads]\n"
.size .L.str.5, 15
.type _Z15MatrixAddKernelIfEvPT_PKS0_S3_j,@object # @_Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.section .rodata._Z15MatrixAddKernelIfEvPT_PKS0_S3_j,"aG",@progbits,_Z15MatrixAddKernelIfEvPT_PKS0_S3_j,comdat
.weak _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.p2align 3, 0x0
_Z15MatrixAddKernelIfEvPT_PKS0_S3_j:
.quad _Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j
.size _Z15MatrixAddKernelIfEvPT_PKS0_S3_j, 8
.type _Z15MatrixAddKernelIdEvPT_PKS0_S3_j,@object # @_Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.section .rodata._Z15MatrixAddKernelIdEvPT_PKS0_S3_j,"aG",@progbits,_Z15MatrixAddKernelIdEvPT_PKS0_S3_j,comdat
.weak _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.p2align 3, 0x0
_Z15MatrixAddKernelIdEvPT_PKS0_S3_j:
.quad _Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j
.size _Z15MatrixAddKernelIdEvPT_PKS0_S3_j, 8
.type .L.str.6,@object # @.str.6
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.6:
.asciz "[ %s ][ %4dx%4d ][ %10d blocks ][ %5d threads ]\t>\t[ %7.3f (ms) ]\n"
.size .L.str.6, 66
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Single Precision"
.size .L.str.7, 17
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Double Precision"
.size .L.str.8, 17
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z15MatrixAddKernelIfEvPT_PKS0_S3_j"
.size .L__unnamed_1, 36
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z15MatrixAddKernelIdEvPT_PKS0_S3_j"
.size .L__unnamed_2, 36
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Wrong parameters. Please use the following format for running."
.size .Lstr, 63
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j
.addrsig_sym _Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym SMd
.addrsig_sym SNd
.addrsig_sym SPd
.addrsig_sym DMd
.addrsig_sym DNd
.addrsig_sym DPd
.addrsig_sym _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.addrsig_sym _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x178] ; /* 0x00005e0000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ UIMAD UR4, UR4, UR4, URZ ; /* 0x00000004040472a4 */
/* 0x000fe2000f8e023f */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fda000bf06070 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0090*/ HFMA2.MMA R9, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff097435 */
/* 0x001fd400000001ff */
/*00a0*/ IMAD.WIDE.U32 R2, R0, R9, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fc800078e0009 */
/*00b0*/ IMAD.WIDE.U32 R4, R0.reuse, R9.reuse, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x0c0fe400078e0009 */
/*00c0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea8000c1e1b00 */
/*00d0*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000604047981 */
/* 0x000ea2000c1e1b00 */
/*00e0*/ IMAD.WIDE.U32 R8, R0, R9, c[0x0][0x160] ; /* 0x0000580000087625 */
/* 0x000fe200078e0009 */
/*00f0*/ MOV R11, c[0x0][0xc] ; /* 0x00000300000b7a02 */
/* 0x000fca0000000f00 */
/*0100*/ IMAD R0, R11, c[0x0][0x0], R0 ; /* 0x000000000b007a24 */
/* 0x000fca00078e0200 */
/*0110*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fe2000bf06070 */
/*0120*/ DADD R6, R4, R2 ; /* 0x0000000004067229 */
/* 0x004e0e0000000002 */
/*0130*/ STG.E.64 [R8.64], R6 ; /* 0x0000000608007986 */
/* 0x0011ea000c101b06 */
/*0140*/ @!P0 BRA 0x90 ; /* 0xffffff4000008947 */
/* 0x000fea000383ffff */
/*0150*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0160*/ BRA 0x160; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x178] ; /* 0x00005e0000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ UIMAD UR4, UR4, UR4, URZ ; /* 0x00000004040472a4 */
/* 0x000fe2000f8e023f */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fda000bf06070 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0090*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x001fd400000001ff */
/*00a0*/ IMAD.WIDE.U32 R2, R0, R7, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fc800078e0007 */
/*00b0*/ IMAD.WIDE.U32 R4, R0.reuse, R7.reuse, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x0c0fe400078e0007 */
/*00c0*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000ea8000c1e1900 */
/*00d0*/ LDG.E R4, [R4.64] ; /* 0x0000000604047981 */
/* 0x000ea2000c1e1900 */
/*00e0*/ IMAD.WIDE.U32 R6, R0, R7, c[0x0][0x160] ; /* 0x0000580000067625 */
/* 0x000fe200078e0007 */
/*00f0*/ MOV R11, c[0x0][0xc] ; /* 0x00000300000b7a02 */
/* 0x000fca0000000f00 */
/*0100*/ IMAD R0, R11, c[0x0][0x0], R0 ; /* 0x000000000b007a24 */
/* 0x000fca00078e0200 */
/*0110*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fe2000bf06070 */
/*0120*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001ee000c101906 */
/*0140*/ @!P0 BRA 0x90 ; /* 0xffffff4000008947 */
/* 0x000fea000383ffff */
/*0150*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0160*/ BRA 0x160; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.section .text._Z15MatrixAddKernelIfEvPT_PKS0_S3_j,"axG",@progbits,_Z15MatrixAddKernelIfEvPT_PKS0_S3_j,comdat
.protected _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.globl _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.p2align 8
.type _Z15MatrixAddKernelIfEvPT_PKS0_S3_j,@function
_Z15MatrixAddKernelIfEvPT_PKS0_S3_j:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x18
s_add_u32 s2, s0, 32
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s4, 0xffff
s_mul_i32 s8, s8, s8
v_mad_u64_u32 v[1:2], null, s15, s9, v[0:1]
s_mov_b32 s4, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s8, v1
s_cbranch_execz .LBB0_3
s_load_b32 s10, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s1, s10, s9
s_mov_b32 s9, 0
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_nc_u32_e32 v1, s1, v1
v_add_co_u32 v5, vcc_lo, s6, v3
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v4, vcc_lo
v_add_co_u32 v7, vcc_lo, s2, v3
v_add_co_ci_u32_e32 v8, vcc_lo, s3, v4, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s8, v1
global_load_b32 v0, v[5:6], off
global_load_b32 v5, v[7:8], off
v_add_co_u32 v3, s0, s4, v3
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s0, s5, v4, s0
s_or_b32 s9, vcc_lo, s9
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v0, v5
global_store_b32 v[3:4], v0, off
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._Z15MatrixAddKernelIfEvPT_PKS0_S3_j,"axG",@progbits,_Z15MatrixAddKernelIfEvPT_PKS0_S3_j,comdat
.Lfunc_end0:
.size _Z15MatrixAddKernelIfEvPT_PKS0_S3_j, .Lfunc_end0-_Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.section .AMDGPU.csdata,"",@progbits
.section .text._Z15MatrixAddKernelIdEvPT_PKS0_S3_j,"axG",@progbits,_Z15MatrixAddKernelIdEvPT_PKS0_S3_j,comdat
.protected _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.globl _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.p2align 8
.type _Z15MatrixAddKernelIdEvPT_PKS0_S3_j,@function
_Z15MatrixAddKernelIdEvPT_PKS0_S3_j:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x18
s_add_u32 s2, s0, 32
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s4, 0xffff
s_mul_i32 s8, s8, s8
v_mad_u64_u32 v[1:2], null, s15, s9, v[0:1]
s_mov_b32 s4, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s8, v1
s_cbranch_execz .LBB1_3
s_load_b32 s10, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s1, s10, s9
s_mov_b32 s9, 0
.p2align 6
.LBB1_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 3, v[1:2]
v_add_nc_u32_e32 v1, s1, v1
v_add_co_u32 v5, vcc_lo, s6, v3
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v4, vcc_lo
v_add_co_u32 v7, vcc_lo, s2, v3
v_add_co_ci_u32_e32 v8, vcc_lo, s3, v4, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s8, v1
global_load_b64 v[5:6], v[5:6], off
global_load_b64 v[7:8], v[7:8], off
v_add_co_u32 v3, s0, s4, v3
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s0, s5, v4, s0
s_or_b32 s9, vcc_lo, s9
s_waitcnt vmcnt(0)
v_add_f64 v[5:6], v[5:6], v[7:8]
global_store_b64 v[3:4], v[5:6], off
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB1_2
.LBB1_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._Z15MatrixAddKernelIdEvPT_PKS0_S3_j,"axG",@progbits,_Z15MatrixAddKernelIdEvPT_PKS0_S3_j,comdat
.Lfunc_end1:
.size _Z15MatrixAddKernelIdEvPT_PKS0_S3_j, .Lfunc_end1-_Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15MatrixAddKernelIfEvPT_PKS0_S3_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15MatrixAddKernelIdEvPT_PKS0_S3_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0010e5b9_00000000-6_addm_s_d.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.section .text._Z15MatrixAddKernelIfEvPT_PKS0_S3_j,"axG",@progbits,_Z15MatrixAddKernelIfEvPT_PKS0_S3_j,comdat
.weak _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.type _Z15MatrixAddKernelIfEvPT_PKS0_S3_j, @function
_Z15MatrixAddKernelIfEvPT_PKS0_S3_j:
.LFB4011:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movq %rdi, 8(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 4(%rsp)
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 24(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L5
.L1:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z15MatrixAddKernelIfEvPT_PKS0_S3_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L1
.L6:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4011:
.size _Z15MatrixAddKernelIfEvPT_PKS0_S3_j, .-_Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.section .text._Z15MatrixAddKernelIdEvPT_PKS0_S3_j,"axG",@progbits,_Z15MatrixAddKernelIdEvPT_PKS0_S3_j,comdat
.weak _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.type _Z15MatrixAddKernelIdEvPT_PKS0_S3_j, @function
_Z15MatrixAddKernelIdEvPT_PKS0_S3_j:
.LFB4012:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movq %rdi, 8(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 4(%rsp)
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 24(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z15MatrixAddKernelIdEvPT_PKS0_S3_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4012:
.size _Z15MatrixAddKernelIdEvPT_PKS0_S3_j, .-_Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.text
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3680:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3680:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9Free_Cudab
.type _Z9Free_Cudab, @function
_Z9Free_Cudab:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
testb %dil, %dil
je .L16
movq SMd(%rip), %rdi
call cudaFree@PLT
movq SNd(%rip), %rdi
call cudaFree@PLT
movq SPd(%rip), %rdi
call cudaFree@PLT
.L15:
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
movq DMd(%rip), %rdi
call cudaFree@PLT
movq DNd(%rip), %rdi
call cudaFree@PLT
movq DPd(%rip), %rdi
call cudaFree@PLT
jmp .L15
.cfi_endproc
.LFE3672:
.size _Z9Free_Cudab, .-_Z9Free_Cudab
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z15MatrixAddKernelIdEvPT_PKS0_S3_j"
.align 8
.LC1:
.string "_Z15MatrixAddKernelIfEvPT_PKS0_S3_j"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3707:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z15MatrixAddKernelIdEvPT_PKS0_S3_j(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z15MatrixAddKernelIfEvPT_PKS0_S3_j(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3707:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._Z9GenMatrixIfEPT_jj,"axG",@progbits,_Z9GenMatrixIfEPT_jj,comdat
.weak _Z9GenMatrixIfEPT_jj
.type _Z9GenMatrixIfEPT_jj, @function
_Z9GenMatrixIfEPT_jj:
.LFB4007:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movl %edi, %r13d
movl %esi, %r15d
imull %esi, %edi
salq $3, %rdi
call malloc@PLT
movq %rax, %r12
movl %r13d, %ebp
movl $0, %r14d
testl %r15d, %r15d
jne .L22
.L21:
movq %r12, %rax
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
call rand@PLT
movl %eax, %edx
movl %ebx, %eax
pxor %xmm0, %xmm0
cvtsi2sdl %edx, %xmm0
divsd .LC2(%rip), %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%r12,%rax,4)
addl $1, %ebx
cmpl %ebp, %ebx
jne .L24
.L26:
addl $1, %r14d
addl %r13d, %ebp
cmpl %r14d, %r15d
je .L21
.L22:
movl %ebp, %ebx
subl %r13d, %ebx
testl %r13d, %r13d
jne .L24
jmp .L26
.cfi_endproc
.LFE4007:
.size _Z9GenMatrixIfEPT_jj, .-_Z9GenMatrixIfEPT_jj
.section .text._Z9Init_CudaIfEvPT_S1_jjb,"axG",@progbits,_Z9Init_CudaIfEvPT_S1_jjb,comdat
.weak _Z9Init_CudaIfEvPT_S1_jjb
.type _Z9Init_CudaIfEvPT_S1_jjb, @function
_Z9Init_CudaIfEvPT_S1_jjb:
.LFB4008:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %r12
movq %rsi, %rbp
imull %ecx, %edx
leal 0(,%rdx,4), %ebx
testb %r8b, %r8b
je .L33
movl %ebx, %ebx
movq %rbx, %rsi
leaq SMd(%rip), %rdi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq SMd(%rip), %rdi
call cudaMemcpy@PLT
movq %rbx, %rsi
leaq SNd(%rip), %rdi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq SNd(%rip), %rdi
call cudaMemcpy@PLT
movq %rbx, %rsi
leaq SPd(%rip), %rdi
call cudaMalloc@PLT
movq %rbx, %rdx
movl $0, %esi
movq SPd(%rip), %rdi
call cudaMemset@PLT
.L32:
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
movl %ebx, %ebx
movq %rbx, %rsi
leaq DMd(%rip), %rdi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq DMd(%rip), %rdi
call cudaMemcpy@PLT
movq %rbx, %rsi
leaq DNd(%rip), %rdi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq DNd(%rip), %rdi
call cudaMemcpy@PLT
movq %rbx, %rsi
leaq DPd(%rip), %rdi
call cudaMalloc@PLT
movq %rbx, %rdx
movl $0, %esi
movq DPd(%rip), %rdi
call cudaMemset@PLT
jmp .L32
.cfi_endproc
.LFE4008:
.size _Z9Init_CudaIfEvPT_S1_jjb, .-_Z9Init_CudaIfEvPT_S1_jjb
.section .text._Z9GenMatrixIdEPT_jj,"axG",@progbits,_Z9GenMatrixIdEPT_jj,comdat
.weak _Z9GenMatrixIdEPT_jj
.type _Z9GenMatrixIdEPT_jj, @function
_Z9GenMatrixIdEPT_jj:
.LFB4009:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movl %edi, %r13d
movl %esi, %r15d
imull %esi, %edi
salq $3, %rdi
call malloc@PLT
movq %rax, %r12
movl %r13d, %ebp
movl $0, %r14d
testl %r15d, %r15d
jne .L37
.L36:
movq %r12, %rax
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
call rand@PLT
movl %eax, %edx
movl %ebx, %eax
pxor %xmm0, %xmm0
cvtsi2sdl %edx, %xmm0
divsd .LC2(%rip), %xmm0
movsd %xmm0, (%r12,%rax,8)
addl $1, %ebx
cmpl %ebp, %ebx
jne .L39
.L41:
addl $1, %r14d
addl %r13d, %ebp
cmpl %r14d, %r15d
je .L36
.L37:
movl %ebp, %ebx
subl %r13d, %ebx
testl %r13d, %r13d
jne .L39
jmp .L41
.cfi_endproc
.LFE4009:
.size _Z9GenMatrixIdEPT_jj, .-_Z9GenMatrixIdEPT_jj
.section .text._Z9Init_CudaIdEvPT_S1_jjb,"axG",@progbits,_Z9Init_CudaIdEvPT_S1_jjb,comdat
.weak _Z9Init_CudaIdEvPT_S1_jjb
.type _Z9Init_CudaIdEvPT_S1_jjb, @function
_Z9Init_CudaIdEvPT_S1_jjb:
.LFB4010:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %r12
movq %rsi, %rbp
imull %ecx, %edx
leal 0(,%rdx,8), %ebx
testb %r8b, %r8b
je .L48
movl %ebx, %ebx
movq %rbx, %rsi
leaq SMd(%rip), %rdi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq SMd(%rip), %rdi
call cudaMemcpy@PLT
movq %rbx, %rsi
leaq SNd(%rip), %rdi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq SNd(%rip), %rdi
call cudaMemcpy@PLT
movq %rbx, %rsi
leaq SPd(%rip), %rdi
call cudaMalloc@PLT
movq %rbx, %rdx
movl $0, %esi
movq SPd(%rip), %rdi
call cudaMemset@PLT
.L47:
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L48:
.cfi_restore_state
movl %ebx, %ebx
movq %rbx, %rsi
leaq DMd(%rip), %rdi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq DMd(%rip), %rdi
call cudaMemcpy@PLT
movq %rbx, %rsi
leaq DNd(%rip), %rdi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq DNd(%rip), %rdi
call cudaMemcpy@PLT
movq %rbx, %rsi
leaq DPd(%rip), %rdi
call cudaMalloc@PLT
movq %rbx, %rdx
movl $0, %esi
movq DPd(%rip), %rdi
call cudaMemset@PLT
jmp .L47
.cfi_endproc
.LFE4010:
.size _Z9Init_CudaIdEvPT_S1_jjb, .-_Z9Init_CudaIdEvPT_S1_jjb
.section .rodata.str1.8
.align 8
.LC3:
.string "Wrong parameters. Please use the following format for running.\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string "[divide_val]"
.LC5:
.string "[single|double]"
.LC6:
.string "[matrix_size]"
.LC7:
.string " Usage: %s %s %s %s %s"
.LC8:
.string "[num_threads]\n"
.LC9:
.string "Double Precision"
.section .rodata.str1.8
.align 8
.LC10:
.string "[ %s ][ %4dx%4d ][ %10d blocks ][ %5d threads ]\t>\t[ %7.3f (ms) ]\n"
.section .rodata.str1.1
.LC11:
.string "Single Precision"
.text
.globl main
.type main, @function
main:
.LFB3677:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %rsi, %r15
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
cmpl $5, %edi
jne .L64
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r12
movl %eax, %ebx
movq 16(%r15), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq 32(%r15), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r13
movl %r12d, %ebp
imull %r12d, %ebp
movq 24(%r15), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rcx
movl %ebp, %eax
movl $0, %edx
divl %ecx
leal -1(%rax,%r13), %eax
movl $0, %edx
divl %r13d
movl %eax, %r14d
movq 16(%r15), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
testl %eax, %eax
je .L53
movl %r12d, %esi
movl %r12d, %edi
call _Z9GenMatrixIfEPT_jj
movq %rax, %r15
movl %r12d, %esi
movl %r12d, %edi
call _Z9GenMatrixIfEPT_jj
movq %rax, 8(%rsp)
movl %r12d, %esi
movl %r12d, %edi
call _Z9GenMatrixIfEPT_jj
movq %rax, 16(%rsp)
movl %r12d, %esi
movl %r12d, %edi
call _Z9GenMatrixIfEPT_jj
movq %rax, 24(%rsp)
movl $1, %r8d
movl %r12d, %ecx
movl %r12d, %edx
movq 8(%rsp), %rsi
movq %r15, %rdi
call _Z9Init_CudaIfEvPT_S1_jjb
leaq 48(%rsp), %rdi
call cudaEventCreate@PLT
leaq 56(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
movl %r14d, 76(%rsp)
movl %r13d, 64(%rsp)
movl 72(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movq 76(%rsp), %rdi
movl 84(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L65
.L54:
movl $0, %esi
movq 56(%rsp), %rdi
call cudaEventRecord@PLT
movq 56(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 44(%rsp), %rdi
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
call cudaEventElapsedTime@PLT
movq 48(%rsp), %rdi
call cudaEventDestroy@PLT
movq 56(%rsp), %rdi
call cudaEventDestroy@PLT
movl %ebp, %edx
salq $2, %rdx
movl $2, %ecx
movq SPd(%rip), %rsi
movq 16(%rsp), %rbp
movq %rbp, %rdi
call cudaMemcpy@PLT
pxor %xmm0, %xmm0
cvtss2sd 44(%rsp), %xmm0
subq $8, %rsp
.cfi_def_cfa_offset 168
pushq %r13
.cfi_def_cfa_offset 176
movl %r14d, %r9d
movl %r12d, %r8d
movl %ebx, %ecx
leaq .LC11(%rip), %rdx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %r15, %rdi
call free@PLT
movq 24(%rsp), %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq 40(%rsp), %rdi
call free@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
movl $1, %edi
call _Z9Free_Cudab
jmp .L58
.L64:
leaq .LC3(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
subq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 168
leaq .LC8(%rip), %rax
pushq %rax
.cfi_def_cfa_offset 176
leaq .LC4(%rip), %r9
leaq .LC5(%rip), %r8
leaq .LC6(%rip), %rcx
movq (%r15), %rdx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L53:
.cfi_restore_state
movl %r12d, %esi
movl %r12d, %edi
call _Z9GenMatrixIdEPT_jj
movq %rax, %r15
movl %r12d, %esi
movl %r12d, %edi
call _Z9GenMatrixIdEPT_jj
movq %rax, 8(%rsp)
movl %r12d, %esi
movl %r12d, %edi
call _Z9GenMatrixIdEPT_jj
movq %rax, 16(%rsp)
movl %r12d, %esi
movl %r12d, %edi
call _Z9GenMatrixIdEPT_jj
movq %rax, 24(%rsp)
movl $0, %r8d
movl %r12d, %ecx
movl %r12d, %edx
movq 8(%rsp), %rsi
movq %r15, %rdi
call _Z9Init_CudaIdEvPT_S1_jjb
leaq 48(%rsp), %rdi
call cudaEventCreate@PLT
leaq 56(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
movl %r14d, 76(%rsp)
movl %r13d, 64(%rsp)
movl 72(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movq 76(%rsp), %rdi
movl 84(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L66
.L56:
movl $0, %esi
movq 56(%rsp), %rdi
call cudaEventRecord@PLT
movq 56(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 44(%rsp), %rdi
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
call cudaEventElapsedTime@PLT
movq 48(%rsp), %rdi
call cudaEventDestroy@PLT
movq 56(%rsp), %rdi
call cudaEventDestroy@PLT
movl %ebp, %edx
salq $3, %rdx
movl $2, %ecx
movq DPd(%rip), %rsi
movq 16(%rsp), %rbp
movq %rbp, %rdi
call cudaMemcpy@PLT
pxor %xmm0, %xmm0
cvtss2sd 44(%rsp), %xmm0
subq $8, %rsp
.cfi_def_cfa_offset 168
pushq %r13
.cfi_def_cfa_offset 176
movl %r14d, %r9d
movl %r12d, %r8d
movl %ebx, %ecx
leaq .LC9(%rip), %rdx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %r15, %rdi
call free@PLT
movq 24(%rsp), %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq 40(%rsp), %rdi
call free@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
movl $0, %edi
call _Z9Free_Cudab
.L58:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L67
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L65:
.cfi_restore_state
movl %r12d, %ecx
movq SNd(%rip), %rdx
movq SMd(%rip), %rsi
movq SPd(%rip), %rdi
call _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
jmp .L54
.L66:
movl %r12d, %ecx
movq DNd(%rip), %rdx
movq DMd(%rip), %rsi
movq DPd(%rip), %rdi
call _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
jmp .L56
.L67:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3677:
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl DPd
.bss
.align 8
.type DPd, @object
.size DPd, 8
DPd:
.zero 8
.globl DNd
.align 8
.type DNd, @object
.size DNd, 8
DNd:
.zero 8
.globl DMd
.align 8
.type DMd, @object
.size DMd, 8
DMd:
.zero 8
.globl SPd
.align 8
.type SPd, @object
.size SPd, 8
SPd:
.zero 8
.globl SNd
.align 8
.type SNd, @object
.size SNd, 8
SNd:
.zero 8
.globl SMd
.align 8
.type SMd, @object
.size SMd, 8
SMd:
.zero 8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long -4194304
.long 1105199103
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "addm_s_d.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z9Free_Cudab # -- Begin function _Z9Free_Cudab
.p2align 4, 0x90
.type _Z9Free_Cudab,@function
_Z9Free_Cudab: # @_Z9Free_Cudab
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $SMd, %eax
movl $DMd, %ecx
testl %edi, %edi
cmovneq %rax, %rcx
movq (%rcx), %rdi
movl $SNd, %eax
movl $DNd, %ebx
cmovneq %rax, %rbx
movl $SPd, %eax
movl $DPd, %r14d
cmovneq %rax, %r14
callq hipFree
movq (%rbx), %rdi
callq hipFree
movq (%r14), %rdi
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
jmp hipFree # TAILCALL
.Lfunc_end0:
.size _Z9Free_Cudab, .Lfunc_end0-_Z9Free_Cudab
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x41dfffffffc00000 # double 2147483647
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $232, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r14
cmpl $5, %edi
jne .LBB1_54
# %bb.1:
movq 8(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r12
movq 32(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
movl %ebx, %eax
movabsq $4294967296, %r13 # imm = 0x100000000
orq %r13, %rax
movq %rax, 192(%rsp) # 8-byte Spill
movl %r12d, %r15d
imull %r15d, %r15d
movq 24(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rcx
movl %r15d, %eax
xorl %edx, %edx
divl %ecx
# kill: def $eax killed $eax def $rax
addl %ebx, %eax
decl %eax
xorl %edx, %edx
movq %rbx, 224(%rsp) # 8-byte Spill
divl %ebx
# kill: def $eax killed $eax def $rax
movq %rax, 216(%rsp) # 8-byte Spill
orq %rax, %r13
movq %r13, 184(%rsp) # 8-byte Spill
movq 16(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
movq %r15, 208(%rsp) # 8-byte Spill
leaq (,%r15,8), %rdi
movq %rdi, 40(%rsp) # 8-byte Spill
callq malloc
movq %rax, %r13
movq %rbx, 200(%rsp) # 8-byte Spill
testl %ebx, %ebx
movq %r12, 8(%rsp) # 8-byte Spill
movq %rax, 56(%rsp) # 8-byte Spill
je .LBB1_23
# %bb.2:
testl %r12d, %r12d
je .LBB1_7
# %bb.3: # %.preheader.i.preheader
movl 8(%rsp), %r14d # 4-byte Reload
xorl %ebp, %ebp
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_4: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB1_5 Depth 2
movl %ebp, %ebx
movq %r14, %r15
.p2align 4, 0x90
.LBB1_5: # Parent Loop BB1_4 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
movl %ebx, %eax
movss %xmm0, (%r13,%rax,4)
incl %ebx
decq %r15
jne .LBB1_5
# %bb.6: # %._crit_edge.i
# in Loop: Header=BB1_4 Depth=1
incl %r12d
movq 8(%rsp), %rax # 8-byte Reload
addl %eax, %ebp
cmpl %eax, %r12d
jne .LBB1_4
.LBB1_7: # %_Z9GenMatrixIfEPT_jj.exit
movq 40(%rsp), %rdi # 8-byte Reload
callq malloc
movq %rax, 24(%rsp) # 8-byte Spill
movq 8(%rsp), %r12 # 8-byte Reload
testl %r12d, %r12d
je .LBB1_12
# %bb.8: # %.preheader.i89.preheader
movl %r12d, %eax
movq %rax, 16(%rsp) # 8-byte Spill
xorl %ebp, %ebp
xorl %r13d, %r13d
movq 24(%rsp), %r14 # 8-byte Reload
.p2align 4, 0x90
.LBB1_9: # %.preheader.i89
# =>This Loop Header: Depth=1
# Child Loop BB1_10 Depth 2
movl %ebp, %ebx
movq 16(%rsp), %r15 # 8-byte Reload
.p2align 4, 0x90
.LBB1_10: # Parent Loop BB1_9 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
movl %ebx, %eax
movss %xmm0, (%r14,%rax,4)
incl %ebx
decq %r15
jne .LBB1_10
# %bb.11: # %._crit_edge.i95
# in Loop: Header=BB1_9 Depth=1
incl %r13d
addl %r12d, %ebp
cmpl %r12d, %r13d
jne .LBB1_9
.LBB1_12: # %_Z9GenMatrixIfEPT_jj.exit97
movq 40(%rsp), %rdi # 8-byte Reload
callq malloc
movq %rax, %rbp
testl %r12d, %r12d
je .LBB1_17
# %bb.13: # %.preheader.i100.preheader
movl %r12d, %eax
movq %rax, 16(%rsp) # 8-byte Spill
xorl %r13d, %r13d
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_14: # %.preheader.i100
# =>This Loop Header: Depth=1
# Child Loop BB1_15 Depth 2
movl %r13d, %ebx
movq 16(%rsp), %r15 # 8-byte Reload
.p2align 4, 0x90
.LBB1_15: # Parent Loop BB1_14 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
movl %ebx, %eax
movss %xmm0, (%rbp,%rax,4)
incl %ebx
decq %r15
jne .LBB1_15
# %bb.16: # %._crit_edge.i106
# in Loop: Header=BB1_14 Depth=1
incl %r14d
addl %r12d, %r13d
cmpl %r12d, %r14d
jne .LBB1_14
.LBB1_17: # %_Z9GenMatrixIfEPT_jj.exit108
movq 40(%rsp), %rdi # 8-byte Reload
callq malloc
movq %rax, %r13
testl %r12d, %r12d
je .LBB1_22
# %bb.18: # %.preheader.i111.preheader
movl %r12d, %eax
movq %rax, 16(%rsp) # 8-byte Spill
xorl %r15d, %r15d
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_19: # %.preheader.i111
# =>This Loop Header: Depth=1
# Child Loop BB1_20 Depth 2
movl %r15d, %r14d
movq 16(%rsp), %r12 # 8-byte Reload
.p2align 4, 0x90
.LBB1_20: # Parent Loop BB1_19 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
movl %r14d, %eax
movss %xmm0, (%r13,%rax,4)
incl %r14d
decq %r12
jne .LBB1_20
# %bb.21: # %._crit_edge.i117
# in Loop: Header=BB1_19 Depth=1
incl %ebx
movq 8(%rsp), %r12 # 8-byte Reload
addl %r12d, %r15d
cmpl %r12d, %ebx
jne .LBB1_19
.LBB1_22: # %_Z9GenMatrixIfEPT_jj.exit119
movl %r12d, %r14d
imull %r14d, %r14d
shll $2, %r14d
movl $SMd, %edi
movq %r14, %rsi
callq hipMalloc
movq SMd(%rip), %rdi
movq 56(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl $SNd, %edi
movq %r14, %rsi
callq hipMalloc
movq SNd(%rip), %rdi
movq 24(%rsp), %r15 # 8-byte Reload
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl $SPd, %ebx
movl $SPd, %edi
jmp .LBB1_44
.LBB1_23:
testl %r12d, %r12d
je .LBB1_28
# %bb.24: # %.preheader.i122.preheader
movl %r12d, %r14d
xorl %ebp, %ebp
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_25: # %.preheader.i122
# =>This Loop Header: Depth=1
# Child Loop BB1_26 Depth 2
movl %ebp, %r15d
movq %r14, %r12
.p2align 4, 0x90
.LBB1_26: # Parent Loop BB1_25 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
movl %r15d, %eax
movsd %xmm0, (%r13,%rax,8)
incl %r15d
decq %r12
jne .LBB1_26
# %bb.27: # %._crit_edge.i128
# in Loop: Header=BB1_25 Depth=1
incl %ebx
movq 8(%rsp), %r12 # 8-byte Reload
addl %r12d, %ebp
cmpl %r12d, %ebx
jne .LBB1_25
.LBB1_28: # %_Z9GenMatrixIdEPT_jj.exit
movq 40(%rsp), %rdi # 8-byte Reload
callq malloc
movq %rax, 24(%rsp) # 8-byte Spill
testl %r12d, %r12d
je .LBB1_33
# %bb.29: # %.preheader.i132.preheader
movl %r12d, %eax
movq %rax, 16(%rsp) # 8-byte Spill
xorl %ebp, %ebp
xorl %ebx, %ebx
movq 24(%rsp), %r14 # 8-byte Reload
.p2align 4, 0x90
.LBB1_30: # %.preheader.i132
# =>This Loop Header: Depth=1
# Child Loop BB1_31 Depth 2
movl %ebp, %r15d
movq 16(%rsp), %r12 # 8-byte Reload
.p2align 4, 0x90
.LBB1_31: # Parent Loop BB1_30 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
movl %r15d, %eax
movsd %xmm0, (%r14,%rax,8)
incl %r15d
decq %r12
jne .LBB1_31
# %bb.32: # %._crit_edge.i138
# in Loop: Header=BB1_30 Depth=1
incl %ebx
movq 8(%rsp), %r12 # 8-byte Reload
addl %r12d, %ebp
cmpl %r12d, %ebx
jne .LBB1_30
.LBB1_33: # %_Z9GenMatrixIdEPT_jj.exit140
movq 40(%rsp), %rdi # 8-byte Reload
callq malloc
movq %rax, %rbp
testl %r12d, %r12d
je .LBB1_38
# %bb.34: # %.preheader.i143.preheader
movl %r12d, %r14d
xorl %r13d, %r13d
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_35: # %.preheader.i143
# =>This Loop Header: Depth=1
# Child Loop BB1_36 Depth 2
movl %r13d, %r15d
movq %r14, %r12
.p2align 4, 0x90
.LBB1_36: # Parent Loop BB1_35 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
movl %r15d, %eax
movsd %xmm0, (%rbp,%rax,8)
incl %r15d
decq %r12
jne .LBB1_36
# %bb.37: # %._crit_edge.i149
# in Loop: Header=BB1_35 Depth=1
incl %ebx
movq 8(%rsp), %r12 # 8-byte Reload
addl %r12d, %r13d
cmpl %r12d, %ebx
jne .LBB1_35
.LBB1_38: # %_Z9GenMatrixIdEPT_jj.exit151
movq 40(%rsp), %rdi # 8-byte Reload
callq malloc
movq %rax, %r13
testl %r12d, %r12d
je .LBB1_43
# %bb.39: # %.preheader.i154.preheader
movl %r12d, %eax
movq %rax, 16(%rsp) # 8-byte Spill
xorl %r14d, %r14d
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_40: # %.preheader.i154
# =>This Loop Header: Depth=1
# Child Loop BB1_41 Depth 2
movl %r14d, %r15d
movq 16(%rsp), %r12 # 8-byte Reload
.p2align 4, 0x90
.LBB1_41: # Parent Loop BB1_40 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movsd .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd %xmm1, %xmm0
movl %r15d, %eax
movsd %xmm0, (%r13,%rax,8)
incl %r15d
decq %r12
jne .LBB1_41
# %bb.42: # %._crit_edge.i160
# in Loop: Header=BB1_40 Depth=1
incl %ebx
movq 8(%rsp), %r12 # 8-byte Reload
addl %r12d, %r14d
cmpl %r12d, %ebx
jne .LBB1_40
.LBB1_43: # %_Z9GenMatrixIdEPT_jj.exit162
movl %r12d, %r14d
imull %r14d, %r14d
shll $3, %r14d
movl $DMd, %edi
movq %r14, %rsi
callq hipMalloc
movq DMd(%rip), %rdi
movq 56(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl $DNd, %edi
movq %r14, %rsi
callq hipMalloc
movq DNd(%rip), %rdi
movq 24(%rsp), %r15 # 8-byte Reload
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl $DPd, %ebx
movl $DPd, %edi
.LBB1_44:
movq %r14, %rsi
callq hipMalloc
movq (%rbx), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
leaq 64(%rsp), %rdi
callq hipEventCreate
leaq 48(%rsp), %rdi
callq hipEventCreate
movq 64(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 184(%rsp), %rdi # 8-byte Reload
movl $1, %esi
movq 192(%rsp), %rdx # 8-byte Reload
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
movq 200(%rsp), %rbx # 8-byte Reload
testl %ebx, %ebx
je .LBB1_47
# %bb.45:
movl $SPd, %ecx
movq %rcx, 24(%rsp) # 8-byte Spill
movl $2, %r14d
movl $.L.str.7, %ecx
movq %rcx, 8(%rsp) # 8-byte Spill
testl %eax, %eax
jne .LBB1_50
# %bb.46:
movq SPd(%rip), %rax
movq SMd(%rip), %rcx
movq SNd(%rip), %rdx
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %rdx, 120(%rsp)
movl %r12d, 36(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 36(%rsp), %rax
movq %rax, 168(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z15MatrixAddKernelIfEvPT_PKS0_S3_j, %edi
jmp .LBB1_49
.LBB1_47:
movl $DPd, %ecx
movq %rcx, 24(%rsp) # 8-byte Spill
movl $3, %r14d
movl $.L.str.8, %ecx
movq %rcx, 8(%rsp) # 8-byte Spill
testl %eax, %eax
jne .LBB1_50
# %bb.48:
movq DPd(%rip), %rax
movq DMd(%rip), %rcx
movq DNd(%rip), %rdx
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %rdx, 120(%rsp)
movl %r12d, 36(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 36(%rsp), %rax
movq %rax, 168(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z15MatrixAddKernelIdEvPT_PKS0_S3_j, %edi
.LBB1_49:
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_50:
movq 48(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 48(%rsp), %rdi
callq hipEventSynchronize
movq 64(%rsp), %rsi
movq 48(%rsp), %rdx
leaq 144(%rsp), %rdi
callq hipEventElapsedTime
movq 64(%rsp), %rdi
callq hipEventDestroy
movq 48(%rsp), %rdi
callq hipEventDestroy
movl 208(%rsp), %edx # 4-byte Reload
movq 24(%rsp), %rax # 8-byte Reload
movq (%rax), %rsi
movl %r14d, %ecx
shlq %cl, %rdx
movq %rbp, %rdi
movl $2, %ecx
callq hipMemcpy
movss 144(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.6, %edi
movq 8(%rsp), %rsi # 8-byte Reload
movl %r12d, %edx
movl %r12d, %ecx
movq 216(%rsp), %r8 # 8-byte Reload
# kill: def $r8d killed $r8d killed $r8
movq 224(%rsp), %r9 # 8-byte Reload
# kill: def $r9d killed $r9d killed $r9
movb $1, %al
callq printf
testl %ebx, %ebx
je .LBB1_52
# %bb.51:
movl $SPd, %r12d
movl $SNd, %r14d
movl $SMd, %ebx
jmp .LBB1_53
.LBB1_52:
movl $DPd, %r12d
movl $DNd, %r14d
movl $DMd, %ebx
.LBB1_53:
movq 56(%rsp), %rdi # 8-byte Reload
callq free
movq %r15, %rdi
callq free
movq %rbp, %rdi
callq free
movq %r13, %rdi
callq free
movq (%rbx), %rdi
callq hipFree
movq (%r14), %rdi
callq hipFree
movq (%r12), %rdi
callq hipFree
xorl %eax, %eax
addq $232, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_54:
.cfi_def_cfa_offset 288
movl $.Lstr, %edi
callq puts@PLT
movq (%r14), %rsi
movl $.L.str.1, %edi
movl $.L.str.2, %edx
movl $.L.str.3, %ecx
movl $.L.str.4, %r8d
movl $.L.str.5, %r9d
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.section .text._Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j,"axG",@progbits,_Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j,comdat
.weak _Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j # -- Begin function _Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j
.p2align 4, 0x90
.type _Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j,@function
_Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j: # @_Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15MatrixAddKernelIfEvPT_PKS0_S3_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j, .Lfunc_end2-_Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j
.cfi_endproc
# -- End function
.section .text._Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j,"axG",@progbits,_Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j,comdat
.weak _Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j # -- Begin function _Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j
.p2align 4, 0x90
.type _Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j,@function
_Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j: # @_Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15MatrixAddKernelIdEvPT_PKS0_S3_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size _Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j, .Lfunc_end3-_Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j
.cfi_endproc
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15MatrixAddKernelIfEvPT_PKS0_S3_j, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15MatrixAddKernelIdEvPT_PKS0_S3_j, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type SMd,@object # @SMd
.bss
.globl SMd
.p2align 3, 0x0
SMd:
.quad 0
.size SMd, 8
.type SNd,@object # @SNd
.globl SNd
.p2align 3, 0x0
SNd:
.quad 0
.size SNd, 8
.type SPd,@object # @SPd
.globl SPd
.p2align 3, 0x0
SPd:
.quad 0
.size SPd, 8
.type DMd,@object # @DMd
.globl DMd
.p2align 3, 0x0
DMd:
.quad 0
.size DMd, 8
.type DNd,@object # @DNd
.globl DNd
.p2align 3, 0x0
DNd:
.quad 0
.size DNd, 8
.type DPd,@object # @DPd
.globl DPd
.p2align 3, 0x0
DPd:
.quad 0
.size DPd, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz " Usage: %s %s %s %s %s"
.size .L.str.1, 23
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "[matrix_size]"
.size .L.str.2, 14
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "[single|double]"
.size .L.str.3, 16
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "[divide_val]"
.size .L.str.4, 13
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "[num_threads]\n"
.size .L.str.5, 15
.type _Z15MatrixAddKernelIfEvPT_PKS0_S3_j,@object # @_Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.section .rodata._Z15MatrixAddKernelIfEvPT_PKS0_S3_j,"aG",@progbits,_Z15MatrixAddKernelIfEvPT_PKS0_S3_j,comdat
.weak _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.p2align 3, 0x0
_Z15MatrixAddKernelIfEvPT_PKS0_S3_j:
.quad _Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j
.size _Z15MatrixAddKernelIfEvPT_PKS0_S3_j, 8
.type _Z15MatrixAddKernelIdEvPT_PKS0_S3_j,@object # @_Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.section .rodata._Z15MatrixAddKernelIdEvPT_PKS0_S3_j,"aG",@progbits,_Z15MatrixAddKernelIdEvPT_PKS0_S3_j,comdat
.weak _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.p2align 3, 0x0
_Z15MatrixAddKernelIdEvPT_PKS0_S3_j:
.quad _Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j
.size _Z15MatrixAddKernelIdEvPT_PKS0_S3_j, 8
.type .L.str.6,@object # @.str.6
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.6:
.asciz "[ %s ][ %4dx%4d ][ %10d blocks ][ %5d threads ]\t>\t[ %7.3f (ms) ]\n"
.size .L.str.6, 66
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Single Precision"
.size .L.str.7, 17
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Double Precision"
.size .L.str.8, 17
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z15MatrixAddKernelIfEvPT_PKS0_S3_j"
.size .L__unnamed_1, 36
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z15MatrixAddKernelIdEvPT_PKS0_S3_j"
.size .L__unnamed_2, 36
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Wrong parameters. Please use the following format for running."
.size .Lstr, 63
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__MatrixAddKernelIfEvPT_PKS0_S3_j
.addrsig_sym _Z30__device_stub__MatrixAddKernelIdEvPT_PKS0_S3_j
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym SMd
.addrsig_sym SNd
.addrsig_sym SPd
.addrsig_sym DMd
.addrsig_sym DNd
.addrsig_sym DPd
.addrsig_sym _Z15MatrixAddKernelIfEvPT_PKS0_S3_j
.addrsig_sym _Z15MatrixAddKernelIdEvPT_PKS0_S3_j
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
using namespace std;
__global__ void sumCols(int* d_matrix, int* d_result, int numRows, int numCols) {
int sum = 0;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = 0; i < numRows; i++) {
sum += d_matrix[idx + (numCols * i)];
}
d_result[idx] = sum;
} | code for sm_80
Function : _Z7sumColsPiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff027624 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ HFMA2.MMA R6, -RZ, RZ, 0, 0 ; /* 0x00000000ff067435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0060*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fd800078e0203 */
/*0080*/ @!P0 BRA 0x790 ; /* 0x0000070000008947 */
/* 0x000fea0003800000 */
/*0090*/ IADD3 R3, R2.reuse, -0x1, RZ ; /* 0xffffffff02037810 */
/* 0x040fe40007ffe0ff */
/*00a0*/ LOP3.LUT R2, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302027812 */
/* 0x000fe400078ec0ff */
/*00b0*/ ISETP.GE.U32.AND P0, PT, R3, 0x3, PT ; /* 0x000000030300780c */
/* 0x000fe20003f06070 */
/*00c0*/ IMAD.MOV.U32 R3, RZ, RZ, RZ ; /* 0x000000ffff037224 */
/* 0x000fe200078e00ff */
/*00d0*/ MOV R6, RZ ; /* 0x000000ff00067202 */
/* 0x000fd60000000f00 */
/*00e0*/ @!P0 BRA 0x6e0 ; /* 0x000005f000008947 */
/* 0x000fea0003800000 */
/*00f0*/ IADD3 R4, -R2, c[0x0][0x170], RZ ; /* 0x00005c0002047a10 */
/* 0x000fe20007ffe1ff */
/*0100*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe200078e00ff */
/*0110*/ HFMA2.MMA R3, -RZ, RZ, 0, 0 ; /* 0x00000000ff037435 */
/* 0x000fe400000001ff */
/*0120*/ ISETP.GT.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f04270 */
/*0130*/ IMAD.WIDE R20, R0, R5, c[0x0][0x160] ; /* 0x0000580000147625 */
/* 0x000fd800078e0205 */
/*0140*/ @!P0 BRA 0x600 ; /* 0x000004b000008947 */
/* 0x000fea0003800000 */
/*0150*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */
/* 0x000fe40003f24270 */
/*0160*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0170*/ @!P1 BRA 0x450 ; /* 0x000002d000009947 */
/* 0x000fea0003800000 */
/*0180*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0190*/ IMAD.WIDE R26, R5.reuse, c[0x0][0x174], R20 ; /* 0x00005d00051a7a25 */
/* 0x040fe200078e0214 */
/*01a0*/ LDG.E R7, [R20.64] ; /* 0x0000000414077981 */
/* 0x0000aa000c1e1900 */
/*01b0*/ IMAD.WIDE R22, R5.reuse, c[0x0][0x174], R26 ; /* 0x00005d0005167a25 */
/* 0x040fe400078e021a */
/*01c0*/ LDG.E R26, [R26.64] ; /* 0x000000041a1a7981 */
/* 0x0002a8000c1e1900 */
/*01d0*/ IMAD.WIDE R14, R5, c[0x0][0x174], R22 ; /* 0x00005d00050e7a25 */
/* 0x000fcc00078e0216 */
/*01e0*/ IMAD.WIDE R16, R5.reuse, c[0x0][0x174], R14 ; /* 0x00005d0005107a25 */
/* 0x040fe200078e020e */
/*01f0*/ LDG.E R27, [R22.64] ; /* 0x00000004161b7981 */
/* 0x0022e8000c1e1900 */
/*0200*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x0008e2000c1e1900 */
/*0210*/ IMAD.WIDE R8, R5, c[0x0][0x174], R16 ; /* 0x00005d0005087a25 */
/* 0x000fc600078e0210 */
/*0220*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000ae6000c1e1900 */
/*0230*/ IMAD.WIDE R12, R5.reuse, c[0x0][0x174], R8 ; /* 0x00005d00050c7a25 */
/* 0x040fe200078e0208 */
/*0240*/ LDG.E R15, [R8.64] ; /* 0x00000004080f7981 */
/* 0x0108ea000c1e1900 */
/*0250*/ IMAD.WIDE R10, R5.reuse, c[0x0][0x174], R12 ; /* 0x00005d00050a7a25 */
/* 0x040fe400078e020c */
/*0260*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x0000e8000c1e1900 */
/*0270*/ IMAD.WIDE R18, R5, c[0x0][0x174], R10 ; /* 0x00005d0005127a25 */
/* 0x000fcc00078e020a */
/*0280*/ IMAD.WIDE R20, R5.reuse, c[0x0][0x174], R18 ; /* 0x00005d0005147a25 */
/* 0x041fe200078e0212 */
/*0290*/ LDG.E R13, [R10.64] ; /* 0x000000040a0d7981 */
/* 0x0000e8000c1e1900 */
/*02a0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000ee2000c1e1900 */
/*02b0*/ IMAD.WIDE R22, R5, c[0x0][0x174], R20 ; /* 0x00005d0005167a25 */
/* 0x002fc600078e0214 */
/*02c0*/ LDG.E R21, [R20.64] ; /* 0x0000000414157981 */
/* 0x000ee6000c1e1900 */
/*02d0*/ IMAD.WIDE R24, R5.reuse, c[0x0][0x174], R22 ; /* 0x00005d0005187a25 */
/* 0x040fe400078e0216 */
/*02e0*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x000ee8000c1e1900 */
/*02f0*/ IMAD.WIDE R28, R5.reuse, c[0x0][0x174], R24 ; /* 0x00005d00051c7a25 */
/* 0x040fe400078e0218 */
/*0300*/ LDG.E R25, [R24.64] ; /* 0x0000000418197981 */
/* 0x000ee8000c1e1900 */
/*0310*/ IMAD.WIDE R8, R5, c[0x0][0x174], R28 ; /* 0x00005d0005087a25 */
/* 0x010fc400078e021c */
/*0320*/ LDG.E R28, [R28.64] ; /* 0x000000041c1c7981 */
/* 0x000f28000c1e1900 */
/*0330*/ IMAD.WIDE R10, R5.reuse, c[0x0][0x174], R8 ; /* 0x00005d00050a7a25 */
/* 0x041fe200078e0208 */
/*0340*/ LDG.E R17, [R8.64] ; /* 0x0000000408117981 */
/* 0x02012a000c1e1900 */
/*0350*/ IMAD.WIDE R8, R5, c[0x0][0x174], R10 ; /* 0x00005d0005087a25 */
/* 0x001fc400078e020a */
/*0360*/ LDG.E R11, [R10.64] ; /* 0x000000040a0b7981 */
/* 0x000168000c1e1900 */
/*0370*/ LDG.E R10, [R8.64] ; /* 0x00000004080a7981 */
/* 0x001f62000c1e1900 */
/*0380*/ IADD3 R4, R4, -0x10, RZ ; /* 0xfffffff004047810 */
/* 0x000fc80007ffe0ff */
/*0390*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */
/* 0x000fe40003f24270 */
/*03a0*/ IADD3 R3, R3, 0x10, RZ ; /* 0x0000001003037810 */
/* 0x000fe40007ffe0ff */
/*03b0*/ IADD3 R7, R26, R7, R6 ; /* 0x000000071a077210 */
/* 0x004fc80007ffe006 */
/*03c0*/ IADD3 R7, R14, R27, R7 ; /* 0x0000001b0e077210 */
/* 0x008fc80007ffe007 */
/*03d0*/ IADD3 R7, R15, R16, R7 ; /* 0x000000100f077210 */
/* 0x000fc80007ffe007 */
/*03e0*/ IADD3 R7, R13, R12, R7 ; /* 0x0000000c0d077210 */
/* 0x000fc80007ffe007 */
/*03f0*/ IADD3 R7, R21, R18, R7 ; /* 0x0000001215077210 */
/* 0x000fe20007ffe007 */
/*0400*/ IMAD.WIDE R20, R5, c[0x0][0x174], R8 ; /* 0x00005d0005147a25 */
/* 0x000fc600078e0208 */
/*0410*/ IADD3 R7, R25, R22, R7 ; /* 0x0000001619077210 */
/* 0x000fc80007ffe007 */
/*0420*/ IADD3 R7, R17, R28, R7 ; /* 0x0000001c11077210 */
/* 0x010fc80007ffe007 */
/*0430*/ IADD3 R6, R10, R11, R7 ; /* 0x0000000b0a067210 */
/* 0x020fe20007ffe007 */
/*0440*/ @P1 BRA 0x190 ; /* 0xfffffd4000001947 */
/* 0x000fea000383ffff */
/*0450*/ ISETP.GT.AND P1, PT, R4, 0x4, PT ; /* 0x000000040400780c */
/* 0x000fda0003f24270 */
/*0460*/ @!P1 BRA 0x5e0 ; /* 0x0000017000009947 */
/* 0x000fea0003800000 */
/*0470*/ IMAD.WIDE R22, R5.reuse, c[0x0][0x174], R20 ; /* 0x00005d0005167a25 */
/* 0x040fe400078e0214 */
/*0480*/ LDG.E R21, [R20.64] ; /* 0x0000000414157981 */
/* 0x000ea8000c1e1900 */
/*0490*/ IMAD.WIDE R8, R5.reuse, c[0x0][0x174], R22 ; /* 0x00005d0005087a25 */
/* 0x040fe400078e0216 */
/*04a0*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x000ea8000c1e1900 */
/*04b0*/ IMAD.WIDE R14, R5, c[0x0][0x174], R8 ; /* 0x00005d00050e7a25 */
/* 0x000fc400078e0208 */
/*04c0*/ LDG.E R9, [R8.64] ; /* 0x0000000408097981 */
/* 0x000ee8000c1e1900 */
/*04d0*/ IMAD.WIDE R12, R5.reuse, c[0x0][0x174], R14 ; /* 0x00005d00050c7a25 */
/* 0x040fe400078e020e */
/*04e0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ee8000c1e1900 */
/*04f0*/ IMAD.WIDE R10, R5, c[0x0][0x174], R12 ; /* 0x00005d00050a7a25 */
/* 0x000fc400078e020c */
/*0500*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000f28000c1e1900 */
/*0510*/ IMAD.WIDE R16, R5.reuse, c[0x0][0x174], R10 ; /* 0x00005d0005107a25 */
/* 0x040fe400078e020a */
/*0520*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000f28000c1e1900 */
/*0530*/ IMAD.WIDE R18, R5, c[0x0][0x174], R16 ; /* 0x00005d0005127a25 */
/* 0x000fc400078e0210 */
/*0540*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000f68000c1e1900 */
/*0550*/ LDG.E R7, [R18.64] ; /* 0x0000000412077981 */
/* 0x000f62000c1e1900 */
/*0560*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0570*/ IADD3 R3, R3, 0x8, RZ ; /* 0x0000000803037810 */
/* 0x000fe40007ffe0ff */
/*0580*/ IADD3 R4, R4, -0x8, RZ ; /* 0xfffffff804047810 */
/* 0x000fe40007ffe0ff */
/*0590*/ IADD3 R6, R22, R21, R6 ; /* 0x0000001516067210 */
/* 0x004fe20007ffe006 */
/*05a0*/ IMAD.WIDE R20, R5, c[0x0][0x174], R18 ; /* 0x00005d0005147a25 */
/* 0x000fc600078e0212 */
/*05b0*/ IADD3 R6, R14, R9, R6 ; /* 0x000000090e067210 */
/* 0x008fc80007ffe006 */
/*05c0*/ IADD3 R6, R10, R13, R6 ; /* 0x0000000d0a067210 */
/* 0x010fc80007ffe006 */
/*05d0*/ IADD3 R6, R7, R17, R6 ; /* 0x0000001107067210 */
/* 0x020fe40007ffe006 */
/*05e0*/ ISETP.NE.OR P0, PT, R4, RZ, P0 ; /* 0x000000ff0400720c */
/* 0x000fda0000705670 */
/*05f0*/ @!P0 BRA 0x6e0 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0600*/ IMAD.WIDE R8, R5.reuse, c[0x0][0x174], R20 ; /* 0x00005d0005087a25 */
/* 0x040fe400078e0214 */
/*0610*/ LDG.E R21, [R20.64] ; /* 0x0000000414157981 */
/* 0x000ea8000c1e1900 */
/*0620*/ IMAD.WIDE R10, R5.reuse, c[0x0][0x174], R8 ; /* 0x00005d00050a7a25 */
/* 0x040fe400078e0208 */
/*0630*/ LDG.E R8, [R8.64] ; /* 0x0000000408087981 */
/* 0x000ea8000c1e1900 */
/*0640*/ IMAD.WIDE R12, R5, c[0x0][0x174], R10 ; /* 0x00005d00050c7a25 */
/* 0x000fc400078e020a */
/*0650*/ LDG.E R11, [R10.64] ; /* 0x000000040a0b7981 */
/* 0x000ee8000c1e1900 */
/*0660*/ LDG.E R7, [R12.64] ; /* 0x000000040c077981 */
/* 0x000ee2000c1e1900 */
/*0670*/ IADD3 R4, R4, -0x4, RZ ; /* 0xfffffffc04047810 */
/* 0x000fc80007ffe0ff */
/*0680*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe40003f05270 */
/*0690*/ IADD3 R3, R3, 0x4, RZ ; /* 0x0000000403037810 */
/* 0x000fe40007ffe0ff */
/*06a0*/ IADD3 R6, R8, R21, R6 ; /* 0x0000001508067210 */
/* 0x004fe20007ffe006 */
/*06b0*/ IMAD.WIDE R20, R5, c[0x0][0x174], R12 ; /* 0x00005d0005147a25 */
/* 0x000fc600078e020c */
/*06c0*/ IADD3 R6, R7, R11, R6 ; /* 0x0000000b07067210 */
/* 0x008fca0007ffe006 */
/*06d0*/ @P0 BRA 0x600 ; /* 0xffffff2000000947 */
/* 0x000fea000383ffff */
/*06e0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fda0003f05270 */
/*06f0*/ @!P0 BRA 0x790 ; /* 0x0000009000008947 */
/* 0x000fea0003800000 */
/*0700*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fe400078e00ff */
/*0710*/ IMAD R3, R3, c[0x0][0x174], R0 ; /* 0x00005d0003037a24 */
/* 0x000fc800078e0200 */
/*0720*/ IMAD.WIDE R4, R3, R7, c[0x0][0x160] ; /* 0x0000580003047625 */
/* 0x000fca00078e0207 */
/*0730*/ LDG.E R3, [R4.64] ; /* 0x0000000404037981 */
/* 0x0000a2000c1e1900 */
/*0740*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fc80007ffe0ff */
/*0750*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f05270 */
/*0760*/ IMAD.WIDE R4, R7, c[0x0][0x174], R4 ; /* 0x00005d0007047a25 */
/* 0x001fe200078e0204 */
/*0770*/ IADD3 R6, R3, R6, RZ ; /* 0x0000000603067210 */
/* 0x004fd60007ffe0ff */
/*0780*/ @P0 BRA 0x730 ; /* 0xffffffa000000947 */
/* 0x000fea000383ffff */
/*0790*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*07a0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fca00078e0203 */
/*07b0*/ STG.E [R2.64], R6 ; /* 0x0000000602007986 */
/* 0x000fe2000c101904 */
/*07c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*07d0*/ BRA 0x7d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*07e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0800*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0810*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0820*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0830*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0840*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0850*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0860*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0870*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
using namespace std;
__global__ void sumCols(int* d_matrix, int* d_result, int numRows, int numCols) {
int sum = 0;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = 0; i < numRows; i++) {
sum += d_matrix[idx + (numCols * i)];
}
d_result[idx] = sum;
} | .file "tmpxft_001aa1d6_00000000-6_sumCols.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z7sumColsPiS_iiPiS_ii
.type _Z30__device_stub__Z7sumColsPiS_iiPiS_ii, @function
_Z30__device_stub__Z7sumColsPiS_iiPiS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7sumColsPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z30__device_stub__Z7sumColsPiS_iiPiS_ii, .-_Z30__device_stub__Z7sumColsPiS_iiPiS_ii
.globl _Z7sumColsPiS_ii
.type _Z7sumColsPiS_ii, @function
_Z7sumColsPiS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z7sumColsPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z7sumColsPiS_ii, .-_Z7sumColsPiS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z7sumColsPiS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7sumColsPiS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
using namespace std;
__global__ void sumCols(int* d_matrix, int* d_result, int numRows, int numCols) {
int sum = 0;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = 0; i < numRows; i++) {
sum += d_matrix[idx + (numCols * i)];
}
d_result[idx] = sum;
} | #include <hip/hip_runtime.h>
#include "includes.h"
using namespace std;
__global__ void sumCols(int* d_matrix, int* d_result, int numRows, int numCols) {
int sum = 0;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = 0; i < numRows; i++) {
sum += d_matrix[idx + (numCols * i)];
}
d_result[idx] = sum;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
using namespace std;
__global__ void sumCols(int* d_matrix, int* d_result, int numRows, int numCols) {
int sum = 0;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = 0; i < numRows; i++) {
sum += d_matrix[idx + (numCols * i)];
}
d_result[idx] = sum;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7sumColsPiS_ii
.globl _Z7sumColsPiS_ii
.p2align 8
.type _Z7sumColsPiS_ii,@function
_Z7sumColsPiS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_cmp_lt_i32 s4, 1
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_cbranch_scc1 .LBB0_3
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s5, s[0:1], 0x14
v_mov_b32_e32 v0, 0
s_delay_alu instid0(VALU_DEP_2)
v_mov_b32_e32 v2, v1
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v3, 31, v2
s_add_i32 s4, s4, -1
s_cmp_eq_u32 s4, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v2, s5, v2
v_add_co_u32 v3, vcc_lo, s2, v3
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
global_load_b32 v3, v[3:4], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v0, v3, v0
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_4
.LBB0_3:
v_mov_b32_e32 v0, 0
.LBB0_4:
s_load_b64 s[0:1], s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[1:2], v0, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7sumColsPiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7sumColsPiS_ii, .Lfunc_end0-_Z7sumColsPiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7sumColsPiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7sumColsPiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
using namespace std;
__global__ void sumCols(int* d_matrix, int* d_result, int numRows, int numCols) {
int sum = 0;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = 0; i < numRows; i++) {
sum += d_matrix[idx + (numCols * i)];
}
d_result[idx] = sum;
} | .text
.file "sumCols.hip"
.globl _Z22__device_stub__sumColsPiS_ii # -- Begin function _Z22__device_stub__sumColsPiS_ii
.p2align 4, 0x90
.type _Z22__device_stub__sumColsPiS_ii,@function
_Z22__device_stub__sumColsPiS_ii: # @_Z22__device_stub__sumColsPiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7sumColsPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z22__device_stub__sumColsPiS_ii, .Lfunc_end0-_Z22__device_stub__sumColsPiS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7sumColsPiS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7sumColsPiS_ii,@object # @_Z7sumColsPiS_ii
.section .rodata,"a",@progbits
.globl _Z7sumColsPiS_ii
.p2align 3, 0x0
_Z7sumColsPiS_ii:
.quad _Z22__device_stub__sumColsPiS_ii
.size _Z7sumColsPiS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7sumColsPiS_ii"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__sumColsPiS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7sumColsPiS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z7sumColsPiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff027624 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ HFMA2.MMA R6, -RZ, RZ, 0, 0 ; /* 0x00000000ff067435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0060*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fd800078e0203 */
/*0080*/ @!P0 BRA 0x790 ; /* 0x0000070000008947 */
/* 0x000fea0003800000 */
/*0090*/ IADD3 R3, R2.reuse, -0x1, RZ ; /* 0xffffffff02037810 */
/* 0x040fe40007ffe0ff */
/*00a0*/ LOP3.LUT R2, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302027812 */
/* 0x000fe400078ec0ff */
/*00b0*/ ISETP.GE.U32.AND P0, PT, R3, 0x3, PT ; /* 0x000000030300780c */
/* 0x000fe20003f06070 */
/*00c0*/ IMAD.MOV.U32 R3, RZ, RZ, RZ ; /* 0x000000ffff037224 */
/* 0x000fe200078e00ff */
/*00d0*/ MOV R6, RZ ; /* 0x000000ff00067202 */
/* 0x000fd60000000f00 */
/*00e0*/ @!P0 BRA 0x6e0 ; /* 0x000005f000008947 */
/* 0x000fea0003800000 */
/*00f0*/ IADD3 R4, -R2, c[0x0][0x170], RZ ; /* 0x00005c0002047a10 */
/* 0x000fe20007ffe1ff */
/*0100*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe200078e00ff */
/*0110*/ HFMA2.MMA R3, -RZ, RZ, 0, 0 ; /* 0x00000000ff037435 */
/* 0x000fe400000001ff */
/*0120*/ ISETP.GT.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f04270 */
/*0130*/ IMAD.WIDE R20, R0, R5, c[0x0][0x160] ; /* 0x0000580000147625 */
/* 0x000fd800078e0205 */
/*0140*/ @!P0 BRA 0x600 ; /* 0x000004b000008947 */
/* 0x000fea0003800000 */
/*0150*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */
/* 0x000fe40003f24270 */
/*0160*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0170*/ @!P1 BRA 0x450 ; /* 0x000002d000009947 */
/* 0x000fea0003800000 */
/*0180*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0190*/ IMAD.WIDE R26, R5.reuse, c[0x0][0x174], R20 ; /* 0x00005d00051a7a25 */
/* 0x040fe200078e0214 */
/*01a0*/ LDG.E R7, [R20.64] ; /* 0x0000000414077981 */
/* 0x0000aa000c1e1900 */
/*01b0*/ IMAD.WIDE R22, R5.reuse, c[0x0][0x174], R26 ; /* 0x00005d0005167a25 */
/* 0x040fe400078e021a */
/*01c0*/ LDG.E R26, [R26.64] ; /* 0x000000041a1a7981 */
/* 0x0002a8000c1e1900 */
/*01d0*/ IMAD.WIDE R14, R5, c[0x0][0x174], R22 ; /* 0x00005d00050e7a25 */
/* 0x000fcc00078e0216 */
/*01e0*/ IMAD.WIDE R16, R5.reuse, c[0x0][0x174], R14 ; /* 0x00005d0005107a25 */
/* 0x040fe200078e020e */
/*01f0*/ LDG.E R27, [R22.64] ; /* 0x00000004161b7981 */
/* 0x0022e8000c1e1900 */
/*0200*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x0008e2000c1e1900 */
/*0210*/ IMAD.WIDE R8, R5, c[0x0][0x174], R16 ; /* 0x00005d0005087a25 */
/* 0x000fc600078e0210 */
/*0220*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000ae6000c1e1900 */
/*0230*/ IMAD.WIDE R12, R5.reuse, c[0x0][0x174], R8 ; /* 0x00005d00050c7a25 */
/* 0x040fe200078e0208 */
/*0240*/ LDG.E R15, [R8.64] ; /* 0x00000004080f7981 */
/* 0x0108ea000c1e1900 */
/*0250*/ IMAD.WIDE R10, R5.reuse, c[0x0][0x174], R12 ; /* 0x00005d00050a7a25 */
/* 0x040fe400078e020c */
/*0260*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x0000e8000c1e1900 */
/*0270*/ IMAD.WIDE R18, R5, c[0x0][0x174], R10 ; /* 0x00005d0005127a25 */
/* 0x000fcc00078e020a */
/*0280*/ IMAD.WIDE R20, R5.reuse, c[0x0][0x174], R18 ; /* 0x00005d0005147a25 */
/* 0x041fe200078e0212 */
/*0290*/ LDG.E R13, [R10.64] ; /* 0x000000040a0d7981 */
/* 0x0000e8000c1e1900 */
/*02a0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000ee2000c1e1900 */
/*02b0*/ IMAD.WIDE R22, R5, c[0x0][0x174], R20 ; /* 0x00005d0005167a25 */
/* 0x002fc600078e0214 */
/*02c0*/ LDG.E R21, [R20.64] ; /* 0x0000000414157981 */
/* 0x000ee6000c1e1900 */
/*02d0*/ IMAD.WIDE R24, R5.reuse, c[0x0][0x174], R22 ; /* 0x00005d0005187a25 */
/* 0x040fe400078e0216 */
/*02e0*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x000ee8000c1e1900 */
/*02f0*/ IMAD.WIDE R28, R5.reuse, c[0x0][0x174], R24 ; /* 0x00005d00051c7a25 */
/* 0x040fe400078e0218 */
/*0300*/ LDG.E R25, [R24.64] ; /* 0x0000000418197981 */
/* 0x000ee8000c1e1900 */
/*0310*/ IMAD.WIDE R8, R5, c[0x0][0x174], R28 ; /* 0x00005d0005087a25 */
/* 0x010fc400078e021c */
/*0320*/ LDG.E R28, [R28.64] ; /* 0x000000041c1c7981 */
/* 0x000f28000c1e1900 */
/*0330*/ IMAD.WIDE R10, R5.reuse, c[0x0][0x174], R8 ; /* 0x00005d00050a7a25 */
/* 0x041fe200078e0208 */
/*0340*/ LDG.E R17, [R8.64] ; /* 0x0000000408117981 */
/* 0x02012a000c1e1900 */
/*0350*/ IMAD.WIDE R8, R5, c[0x0][0x174], R10 ; /* 0x00005d0005087a25 */
/* 0x001fc400078e020a */
/*0360*/ LDG.E R11, [R10.64] ; /* 0x000000040a0b7981 */
/* 0x000168000c1e1900 */
/*0370*/ LDG.E R10, [R8.64] ; /* 0x00000004080a7981 */
/* 0x001f62000c1e1900 */
/*0380*/ IADD3 R4, R4, -0x10, RZ ; /* 0xfffffff004047810 */
/* 0x000fc80007ffe0ff */
/*0390*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */
/* 0x000fe40003f24270 */
/*03a0*/ IADD3 R3, R3, 0x10, RZ ; /* 0x0000001003037810 */
/* 0x000fe40007ffe0ff */
/*03b0*/ IADD3 R7, R26, R7, R6 ; /* 0x000000071a077210 */
/* 0x004fc80007ffe006 */
/*03c0*/ IADD3 R7, R14, R27, R7 ; /* 0x0000001b0e077210 */
/* 0x008fc80007ffe007 */
/*03d0*/ IADD3 R7, R15, R16, R7 ; /* 0x000000100f077210 */
/* 0x000fc80007ffe007 */
/*03e0*/ IADD3 R7, R13, R12, R7 ; /* 0x0000000c0d077210 */
/* 0x000fc80007ffe007 */
/*03f0*/ IADD3 R7, R21, R18, R7 ; /* 0x0000001215077210 */
/* 0x000fe20007ffe007 */
/*0400*/ IMAD.WIDE R20, R5, c[0x0][0x174], R8 ; /* 0x00005d0005147a25 */
/* 0x000fc600078e0208 */
/*0410*/ IADD3 R7, R25, R22, R7 ; /* 0x0000001619077210 */
/* 0x000fc80007ffe007 */
/*0420*/ IADD3 R7, R17, R28, R7 ; /* 0x0000001c11077210 */
/* 0x010fc80007ffe007 */
/*0430*/ IADD3 R6, R10, R11, R7 ; /* 0x0000000b0a067210 */
/* 0x020fe20007ffe007 */
/*0440*/ @P1 BRA 0x190 ; /* 0xfffffd4000001947 */
/* 0x000fea000383ffff */
/*0450*/ ISETP.GT.AND P1, PT, R4, 0x4, PT ; /* 0x000000040400780c */
/* 0x000fda0003f24270 */
/*0460*/ @!P1 BRA 0x5e0 ; /* 0x0000017000009947 */
/* 0x000fea0003800000 */
/*0470*/ IMAD.WIDE R22, R5.reuse, c[0x0][0x174], R20 ; /* 0x00005d0005167a25 */
/* 0x040fe400078e0214 */
/*0480*/ LDG.E R21, [R20.64] ; /* 0x0000000414157981 */
/* 0x000ea8000c1e1900 */
/*0490*/ IMAD.WIDE R8, R5.reuse, c[0x0][0x174], R22 ; /* 0x00005d0005087a25 */
/* 0x040fe400078e0216 */
/*04a0*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x000ea8000c1e1900 */
/*04b0*/ IMAD.WIDE R14, R5, c[0x0][0x174], R8 ; /* 0x00005d00050e7a25 */
/* 0x000fc400078e0208 */
/*04c0*/ LDG.E R9, [R8.64] ; /* 0x0000000408097981 */
/* 0x000ee8000c1e1900 */
/*04d0*/ IMAD.WIDE R12, R5.reuse, c[0x0][0x174], R14 ; /* 0x00005d00050c7a25 */
/* 0x040fe400078e020e */
/*04e0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ee8000c1e1900 */
/*04f0*/ IMAD.WIDE R10, R5, c[0x0][0x174], R12 ; /* 0x00005d00050a7a25 */
/* 0x000fc400078e020c */
/*0500*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000f28000c1e1900 */
/*0510*/ IMAD.WIDE R16, R5.reuse, c[0x0][0x174], R10 ; /* 0x00005d0005107a25 */
/* 0x040fe400078e020a */
/*0520*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000f28000c1e1900 */
/*0530*/ IMAD.WIDE R18, R5, c[0x0][0x174], R16 ; /* 0x00005d0005127a25 */
/* 0x000fc400078e0210 */
/*0540*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000f68000c1e1900 */
/*0550*/ LDG.E R7, [R18.64] ; /* 0x0000000412077981 */
/* 0x000f62000c1e1900 */
/*0560*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0570*/ IADD3 R3, R3, 0x8, RZ ; /* 0x0000000803037810 */
/* 0x000fe40007ffe0ff */
/*0580*/ IADD3 R4, R4, -0x8, RZ ; /* 0xfffffff804047810 */
/* 0x000fe40007ffe0ff */
/*0590*/ IADD3 R6, R22, R21, R6 ; /* 0x0000001516067210 */
/* 0x004fe20007ffe006 */
/*05a0*/ IMAD.WIDE R20, R5, c[0x0][0x174], R18 ; /* 0x00005d0005147a25 */
/* 0x000fc600078e0212 */
/*05b0*/ IADD3 R6, R14, R9, R6 ; /* 0x000000090e067210 */
/* 0x008fc80007ffe006 */
/*05c0*/ IADD3 R6, R10, R13, R6 ; /* 0x0000000d0a067210 */
/* 0x010fc80007ffe006 */
/*05d0*/ IADD3 R6, R7, R17, R6 ; /* 0x0000001107067210 */
/* 0x020fe40007ffe006 */
/*05e0*/ ISETP.NE.OR P0, PT, R4, RZ, P0 ; /* 0x000000ff0400720c */
/* 0x000fda0000705670 */
/*05f0*/ @!P0 BRA 0x6e0 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0600*/ IMAD.WIDE R8, R5.reuse, c[0x0][0x174], R20 ; /* 0x00005d0005087a25 */
/* 0x040fe400078e0214 */
/*0610*/ LDG.E R21, [R20.64] ; /* 0x0000000414157981 */
/* 0x000ea8000c1e1900 */
/*0620*/ IMAD.WIDE R10, R5.reuse, c[0x0][0x174], R8 ; /* 0x00005d00050a7a25 */
/* 0x040fe400078e0208 */
/*0630*/ LDG.E R8, [R8.64] ; /* 0x0000000408087981 */
/* 0x000ea8000c1e1900 */
/*0640*/ IMAD.WIDE R12, R5, c[0x0][0x174], R10 ; /* 0x00005d00050c7a25 */
/* 0x000fc400078e020a */
/*0650*/ LDG.E R11, [R10.64] ; /* 0x000000040a0b7981 */
/* 0x000ee8000c1e1900 */
/*0660*/ LDG.E R7, [R12.64] ; /* 0x000000040c077981 */
/* 0x000ee2000c1e1900 */
/*0670*/ IADD3 R4, R4, -0x4, RZ ; /* 0xfffffffc04047810 */
/* 0x000fc80007ffe0ff */
/*0680*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe40003f05270 */
/*0690*/ IADD3 R3, R3, 0x4, RZ ; /* 0x0000000403037810 */
/* 0x000fe40007ffe0ff */
/*06a0*/ IADD3 R6, R8, R21, R6 ; /* 0x0000001508067210 */
/* 0x004fe20007ffe006 */
/*06b0*/ IMAD.WIDE R20, R5, c[0x0][0x174], R12 ; /* 0x00005d0005147a25 */
/* 0x000fc600078e020c */
/*06c0*/ IADD3 R6, R7, R11, R6 ; /* 0x0000000b07067210 */
/* 0x008fca0007ffe006 */
/*06d0*/ @P0 BRA 0x600 ; /* 0xffffff2000000947 */
/* 0x000fea000383ffff */
/*06e0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fda0003f05270 */
/*06f0*/ @!P0 BRA 0x790 ; /* 0x0000009000008947 */
/* 0x000fea0003800000 */
/*0700*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fe400078e00ff */
/*0710*/ IMAD R3, R3, c[0x0][0x174], R0 ; /* 0x00005d0003037a24 */
/* 0x000fc800078e0200 */
/*0720*/ IMAD.WIDE R4, R3, R7, c[0x0][0x160] ; /* 0x0000580003047625 */
/* 0x000fca00078e0207 */
/*0730*/ LDG.E R3, [R4.64] ; /* 0x0000000404037981 */
/* 0x0000a2000c1e1900 */
/*0740*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fc80007ffe0ff */
/*0750*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f05270 */
/*0760*/ IMAD.WIDE R4, R7, c[0x0][0x174], R4 ; /* 0x00005d0007047a25 */
/* 0x001fe200078e0204 */
/*0770*/ IADD3 R6, R3, R6, RZ ; /* 0x0000000603067210 */
/* 0x004fd60007ffe0ff */
/*0780*/ @P0 BRA 0x730 ; /* 0xffffffa000000947 */
/* 0x000fea000383ffff */
/*0790*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*07a0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fca00078e0203 */
/*07b0*/ STG.E [R2.64], R6 ; /* 0x0000000602007986 */
/* 0x000fe2000c101904 */
/*07c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*07d0*/ BRA 0x7d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*07e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0800*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0810*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0820*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0830*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0840*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0850*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0860*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0870*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7sumColsPiS_ii
.globl _Z7sumColsPiS_ii
.p2align 8
.type _Z7sumColsPiS_ii,@function
_Z7sumColsPiS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_cmp_lt_i32 s4, 1
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_cbranch_scc1 .LBB0_3
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s5, s[0:1], 0x14
v_mov_b32_e32 v0, 0
s_delay_alu instid0(VALU_DEP_2)
v_mov_b32_e32 v2, v1
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v3, 31, v2
s_add_i32 s4, s4, -1
s_cmp_eq_u32 s4, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v2, s5, v2
v_add_co_u32 v3, vcc_lo, s2, v3
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
global_load_b32 v3, v[3:4], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v0, v3, v0
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_4
.LBB0_3:
v_mov_b32_e32 v0, 0
.LBB0_4:
s_load_b64 s[0:1], s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[1:2], v0, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7sumColsPiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7sumColsPiS_ii, .Lfunc_end0-_Z7sumColsPiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7sumColsPiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7sumColsPiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001aa1d6_00000000-6_sumCols.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z7sumColsPiS_iiPiS_ii
.type _Z30__device_stub__Z7sumColsPiS_iiPiS_ii, @function
_Z30__device_stub__Z7sumColsPiS_iiPiS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7sumColsPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z30__device_stub__Z7sumColsPiS_iiPiS_ii, .-_Z30__device_stub__Z7sumColsPiS_iiPiS_ii
.globl _Z7sumColsPiS_ii
.type _Z7sumColsPiS_ii, @function
_Z7sumColsPiS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z7sumColsPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z7sumColsPiS_ii, .-_Z7sumColsPiS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z7sumColsPiS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7sumColsPiS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "sumCols.hip"
.globl _Z22__device_stub__sumColsPiS_ii # -- Begin function _Z22__device_stub__sumColsPiS_ii
.p2align 4, 0x90
.type _Z22__device_stub__sumColsPiS_ii,@function
_Z22__device_stub__sumColsPiS_ii: # @_Z22__device_stub__sumColsPiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7sumColsPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z22__device_stub__sumColsPiS_ii, .Lfunc_end0-_Z22__device_stub__sumColsPiS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7sumColsPiS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7sumColsPiS_ii,@object # @_Z7sumColsPiS_ii
.section .rodata,"a",@progbits
.globl _Z7sumColsPiS_ii
.p2align 3, 0x0
_Z7sumColsPiS_ii:
.quad _Z22__device_stub__sumColsPiS_ii
.size _Z7sumColsPiS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7sumColsPiS_ii"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__sumColsPiS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7sumColsPiS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<iostream>
#include<cstdlib>
#include<fstream>
#include<string>
#include<sys/time.h>
typedef unsigned long long int UINT;
using namespace std;
__global__ void GPU(int *dev_table, int startIdx, int curjobs, const int rowsize, int startx, int starty){
int thread = blockIdx.x * blockDim.x + threadIdx.x;
if (thread < curjobs){
int idx = startIdx + (thread * rowsize - thread);
dev_table[idx] = (dev_table[idx-1] + dev_table[idx-rowsize] + dev_table[idx]
+ dev_table[idx+1] + dev_table[idx+rowsize]) / 5;
}
}
void checkGPUError(cudaError err){
if (cudaSuccess != err){
printf("CUDA error in file %s, in line %i: %s\n", __FILE__, __LINE__, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void SOR(int n1, int n2, int *arr){
int paddsize = 1;
int rowsize = n1 + 2 * paddsize;
int colsize = n2 + 2 * paddsize;
int *dev_table;
size_t freeMem, totalMem;
cudaMemGetInfo(&freeMem, &totalMem);
int tablesize = rowsize * colsize;
cout << "current GPU memory info FREE: " << freeMem << " Bytes, Total: " << totalMem << " Bytes.";
cout << "colsize: " << colsize << ", rowsize: " << rowsize << ", allocates: " << tablesize * sizeof(int)<< " Bytes." << endl;
cudaError err = cudaMalloc(&dev_table, tablesize * sizeof(int));
checkGPUError(err);
cudaMemcpy(dev_table, arr, tablesize * sizeof(int), cudaMemcpyHostToDevice);
int maxthreads = min(n1 ,n2);
int maxlevel = n1 + n2 - 1;
int curlevel = 1;
int curjobs = 1;
int startx, starty;
int threadPerBlock = 128, blockPerGrid;
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
//suppose n1 is the row size and the longer array
while(curlevel <= maxlevel){
// cout << "level: " << curlevel << endl;
int startIdx;
if (curlevel <= n1){
startIdx = curlevel - 1;
curjobs = curlevel;
startx = startIdx;
starty = 0;
}
else{
startIdx = n1 - 1 + rowsize * (curlevel - n1);
curjobs = 2 * n1 - curlevel;
startx = n1 - 1;
starty = curlevel - n1;
}
int numthreads = (curjobs + 31) / 32;
numthreads *= 32;
blockPerGrid = (numthreads + threadPerBlock - 1) / threadPerBlock;
GPU<<<blockPerGrid, threadPerBlock>>>(&dev_table[paddsize*rowsize+paddsize], startIdx, curjobs, rowsize, startx, starty);
cudaDeviceSynchronize();
curlevel++;
}
// cudaMemcpy(table, dev_table, (n1+paddsize)*rowsize*sizeof(int), cudaMemcpyDeviceToHost);
/*
//display table
cout << "full table: " << endl;
for (int i=0; i<n1+paddsize; i++){
for (int j=0; j<n2+paddsize; j++){
cout << table[i * rowsize + j] << " ";
}
cout << endl;
}
*/
cudaFree(dev_table);
} | code for sm_80
Function : _Z3GPUPiiiiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD R0, R0, c[0x0][0x170], -R0 ; /* 0x00005c0000007a24 */
/* 0x000fe200078e0a00 */
/*0070*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0090*/ IADD3 R0, R0, c[0x0][0x168], RZ ; /* 0x00005a0000007a10 */
/* 0x000fc80007ffe0ff */
/*00a0*/ IADD3 R4, R0.reuse, -c[0x0][0x170], RZ ; /* 0x80005c0000047a10 */
/* 0x040fe40007ffe0ff */
/*00b0*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0207 */
/*00c0*/ IMAD.WIDE R4, R4, R7, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe200078e0207 */
/*00d0*/ LDG.E R9, [R2.64+-0x4] ; /* 0xfffffc0402097981 */
/* 0x000ea6000c1e1900 */
/*00e0*/ IMAD.WIDE R6, R7, c[0x0][0x170], R2 ; /* 0x00005c0007067a25 */
/* 0x000fe200078e0202 */
/*00f0*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0110*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ee8000c1e1900 */
/*0120*/ LDG.E R11, [R2.64+0x4] ; /* 0x00000404020b7981 */
/* 0x000ee2000c1e1900 */
/*0130*/ IADD3 R0, R0, R4, R9 ; /* 0x0000000400007210 */
/* 0x004fc80007ffe009 */
/*0140*/ IADD3 R0, R6, R0, R11 ; /* 0x0000000006007210 */
/* 0x008fca0007ffe00b */
/*0150*/ IMAD.HI R0, R0, 0x66666667, RZ ; /* 0x6666666700007827 */
/* 0x000fca00078e02ff */
/*0160*/ SHF.R.S32.HI R9, RZ, 0x1, R0 ; /* 0x00000001ff097819 */
/* 0x000fc80000011400 */
/*0170*/ LEA.HI R9, R0, R9, RZ, 0x1 ; /* 0x0000000900097211 */
/* 0x000fca00078f08ff */
/*0180*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x000fe2000c101904 */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ BRA 0x1a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<iostream>
#include<cstdlib>
#include<fstream>
#include<string>
#include<sys/time.h>
typedef unsigned long long int UINT;
using namespace std;
__global__ void GPU(int *dev_table, int startIdx, int curjobs, const int rowsize, int startx, int starty){
int thread = blockIdx.x * blockDim.x + threadIdx.x;
if (thread < curjobs){
int idx = startIdx + (thread * rowsize - thread);
dev_table[idx] = (dev_table[idx-1] + dev_table[idx-rowsize] + dev_table[idx]
+ dev_table[idx+1] + dev_table[idx+rowsize]) / 5;
}
}
void checkGPUError(cudaError err){
if (cudaSuccess != err){
printf("CUDA error in file %s, in line %i: %s\n", __FILE__, __LINE__, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void SOR(int n1, int n2, int *arr){
int paddsize = 1;
int rowsize = n1 + 2 * paddsize;
int colsize = n2 + 2 * paddsize;
int *dev_table;
size_t freeMem, totalMem;
cudaMemGetInfo(&freeMem, &totalMem);
int tablesize = rowsize * colsize;
cout << "current GPU memory info FREE: " << freeMem << " Bytes, Total: " << totalMem << " Bytes.";
cout << "colsize: " << colsize << ", rowsize: " << rowsize << ", allocates: " << tablesize * sizeof(int)<< " Bytes." << endl;
cudaError err = cudaMalloc(&dev_table, tablesize * sizeof(int));
checkGPUError(err);
cudaMemcpy(dev_table, arr, tablesize * sizeof(int), cudaMemcpyHostToDevice);
int maxthreads = min(n1 ,n2);
int maxlevel = n1 + n2 - 1;
int curlevel = 1;
int curjobs = 1;
int startx, starty;
int threadPerBlock = 128, blockPerGrid;
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
//suppose n1 is the row size and the longer array
while(curlevel <= maxlevel){
// cout << "level: " << curlevel << endl;
int startIdx;
if (curlevel <= n1){
startIdx = curlevel - 1;
curjobs = curlevel;
startx = startIdx;
starty = 0;
}
else{
startIdx = n1 - 1 + rowsize * (curlevel - n1);
curjobs = 2 * n1 - curlevel;
startx = n1 - 1;
starty = curlevel - n1;
}
int numthreads = (curjobs + 31) / 32;
numthreads *= 32;
blockPerGrid = (numthreads + threadPerBlock - 1) / threadPerBlock;
GPU<<<blockPerGrid, threadPerBlock>>>(&dev_table[paddsize*rowsize+paddsize], startIdx, curjobs, rowsize, startx, starty);
cudaDeviceSynchronize();
curlevel++;
}
// cudaMemcpy(table, dev_table, (n1+paddsize)*rowsize*sizeof(int), cudaMemcpyDeviceToHost);
/*
//display table
cout << "full table: " << endl;
for (int i=0; i<n1+paddsize; i++){
for (int j=0; j<n2+paddsize; j++){
cout << table[i * rowsize + j] << " ";
}
cout << endl;
}
*/
cudaFree(dev_table);
} | .file "tmpxft_000fca0f_00000000-6_GPU.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3804:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3804:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "/home/ubuntu/Datasets/stackv2/train-structured/zzzhe1990/GPUMemEfficient/master/2D-HEAT/GPU.cu"
.align 8
.LC1:
.string "CUDA error in file %s, in line %i: %s\n"
.text
.globl _Z13checkGPUError9cudaError
.type _Z13checkGPUError9cudaError, @function
_Z13checkGPUError9cudaError:
.LFB3800:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L8
ret
.L8:
subq $8, %rsp
.cfi_def_cfa_offset 16
call cudaGetErrorString@PLT
movq %rax, %r8
movl $23, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE3800:
.size _Z13checkGPUError9cudaError, .-_Z13checkGPUError9cudaError
.globl _Z27__device_stub__Z3GPUPiiiiiiPiiiiii
.type _Z27__device_stub__Z3GPUPiiiiiiPiiiiii, @function
_Z27__device_stub__Z3GPUPiiiiiiPiiiiii:
.LFB3826:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movl %ecx, 12(%rsp)
movl %r8d, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 12(%rsp), %rax
movq %rax, 120(%rsp)
leaq 8(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 184
pushq 40(%rsp)
.cfi_def_cfa_offset 192
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3GPUPiiiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3826:
.size _Z27__device_stub__Z3GPUPiiiiiiPiiiiii, .-_Z27__device_stub__Z3GPUPiiiiiiPiiiiii
.globl _Z3GPUPiiiiii
.type _Z3GPUPiiiiii, @function
_Z3GPUPiiiiii:
.LFB3827:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z3GPUPiiiiiiPiiiiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3827:
.size _Z3GPUPiiiiii, .-_Z3GPUPiiiiii
.section .rodata.str1.8
.align 8
.LC2:
.string "current GPU memory info FREE: "
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string " Bytes, Total: "
.LC4:
.string " Bytes."
.LC5:
.string "colsize: "
.LC6:
.string ", rowsize: "
.LC7:
.string ", allocates: "
.text
.globl _Z3SORiiPi
.type _Z3SORiiPi, @function
_Z3SORiiPi:
.LFB3801:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movl %edi, %r15d
movl %esi, %ebx
movq %rdx, (%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leal 2(%rdi), %r13d
leal 2(%rsi), %r14d
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdi
call cudaMemGetInfo@PLT
movl %r13d, %eax
imull %r14d, %eax
movl %eax, 8(%rsp)
movl $30, %edx
leaq .LC2(%rip), %rsi
leaq _ZSt4cout(%rip), %rbp
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq 48(%rsp), %rsi
movq %rbp, %rdi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %r12
movl $15, %edx
leaq .LC3(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq 56(%rsp), %rsi
movq %r12, %rdi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
movl $7, %edx
leaq .LC4(%rip), %r12
movq %r12, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl $9, %edx
leaq .LC5(%rip), %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %r14d, %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $11, %edx
leaq .LC6(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %r13d, %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $13, %edx
leaq .LC7(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movslq 8(%rsp), %r14
salq $2, %r14
movq %r14, %rsi
movq %rbp, %rdi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rbp
movl $7, %edx
movq %r12, %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq 0(%rbp), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %r12
testq %r12, %r12
je .L30
cmpb $0, 56(%r12)
je .L20
movzbl 67(%r12), %esi
.L21:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
leaq 40(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
movl %eax, %edi
call _Z13checkGPUError9cudaError
movl $1, %ecx
movq %r14, %rdx
movq (%rsp), %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
leal (%r15,%rbx), %eax
movl %eax, %ebx
movl %eax, 16(%rsp)
movl $2, %edi
call cudaDeviceSetCacheConfig@PLT
cmpl $1, %ebx
jle .L22
leal 3(%r15), %eax
cltq
salq $2, %rax
movq %rax, 24(%rsp)
leal -1(%r15,%r15), %r12d
movl $1, %eax
subl %r15d, %eax
imull %r13d, %eax
leal -1(%r15,%rax), %r14d
movl $1, %ebx
leal -1(%r15), %eax
movl %eax, 20(%rsp)
jmp .L26
.L30:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L31
call _ZSt16__throw_bad_castv@PLT
.L31:
call __stack_chk_fail@PLT
.L20:
movq %r12, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r12), %rax
movl $10, %esi
movq %r12, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L21
.L23:
movl 20(%rsp), %eax
movl %eax, (%rsp)
movl %r15d, %eax
subl %r12d, %eax
movl %eax, 8(%rsp)
movl %r14d, 12(%rsp)
movl %r12d, %ebp
.L24:
leal 62(%rbp), %eax
movl %ebp, %edx
addl $31, %edx
cmovns %edx, %eax
movl $128, 76(%rsp)
movl $1, 80(%rsp)
andl $-32, %eax
leal 254(%rax), %edx
addl $127, %eax
cmovs %edx, %eax
sarl $7, %eax
movl %eax, 64(%rsp)
movl $1, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L32
.L25:
call cudaDeviceSynchronize@PLT
addl $1, %ebx
subl $1, %r12d
addl %r13d, %r14d
cmpl %ebx, 16(%rsp)
je .L22
.L26:
cmpl %ebx, %r15d
jl .L23
leal -1(%rbx), %eax
movl %eax, (%rsp)
movl %eax, 12(%rsp)
movl %ebx, %ebp
movl $0, 8(%rsp)
jmp .L24
.L32:
movq 24(%rsp), %rdi
addq 40(%rsp), %rdi
movl 8(%rsp), %r9d
movl (%rsp), %r8d
movl %r13d, %ecx
movl %ebp, %edx
movl 12(%rsp), %esi
call _Z27__device_stub__Z3GPUPiiiiiiPiiiiii
jmp .L25
.L22:
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L33
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3801:
.size _Z3SORiiPi, .-_Z3SORiiPi
.section .rodata.str1.1
.LC8:
.string "_Z3GPUPiiiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3829:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z3GPUPiiiiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3829:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<iostream>
#include<cstdlib>
#include<fstream>
#include<string>
#include<sys/time.h>
typedef unsigned long long int UINT;
using namespace std;
__global__ void GPU(int *dev_table, int startIdx, int curjobs, const int rowsize, int startx, int starty){
int thread = blockIdx.x * blockDim.x + threadIdx.x;
if (thread < curjobs){
int idx = startIdx + (thread * rowsize - thread);
dev_table[idx] = (dev_table[idx-1] + dev_table[idx-rowsize] + dev_table[idx]
+ dev_table[idx+1] + dev_table[idx+rowsize]) / 5;
}
}
void checkGPUError(cudaError err){
if (cudaSuccess != err){
printf("CUDA error in file %s, in line %i: %s\n", __FILE__, __LINE__, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void SOR(int n1, int n2, int *arr){
int paddsize = 1;
int rowsize = n1 + 2 * paddsize;
int colsize = n2 + 2 * paddsize;
int *dev_table;
size_t freeMem, totalMem;
cudaMemGetInfo(&freeMem, &totalMem);
int tablesize = rowsize * colsize;
cout << "current GPU memory info FREE: " << freeMem << " Bytes, Total: " << totalMem << " Bytes.";
cout << "colsize: " << colsize << ", rowsize: " << rowsize << ", allocates: " << tablesize * sizeof(int)<< " Bytes." << endl;
cudaError err = cudaMalloc(&dev_table, tablesize * sizeof(int));
checkGPUError(err);
cudaMemcpy(dev_table, arr, tablesize * sizeof(int), cudaMemcpyHostToDevice);
int maxthreads = min(n1 ,n2);
int maxlevel = n1 + n2 - 1;
int curlevel = 1;
int curjobs = 1;
int startx, starty;
int threadPerBlock = 128, blockPerGrid;
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
//suppose n1 is the row size and the longer array
while(curlevel <= maxlevel){
// cout << "level: " << curlevel << endl;
int startIdx;
if (curlevel <= n1){
startIdx = curlevel - 1;
curjobs = curlevel;
startx = startIdx;
starty = 0;
}
else{
startIdx = n1 - 1 + rowsize * (curlevel - n1);
curjobs = 2 * n1 - curlevel;
startx = n1 - 1;
starty = curlevel - n1;
}
int numthreads = (curjobs + 31) / 32;
numthreads *= 32;
blockPerGrid = (numthreads + threadPerBlock - 1) / threadPerBlock;
GPU<<<blockPerGrid, threadPerBlock>>>(&dev_table[paddsize*rowsize+paddsize], startIdx, curjobs, rowsize, startx, starty);
cudaDeviceSynchronize();
curlevel++;
}
// cudaMemcpy(table, dev_table, (n1+paddsize)*rowsize*sizeof(int), cudaMemcpyDeviceToHost);
/*
//display table
cout << "full table: " << endl;
for (int i=0; i<n1+paddsize; i++){
for (int j=0; j<n2+paddsize; j++){
cout << table[i * rowsize + j] << " ";
}
cout << endl;
}
*/
cudaFree(dev_table);
} | #include <hip/hip_runtime.h>
#include<iostream>
#include<cstdlib>
#include<fstream>
#include<string>
#include<sys/time.h>
typedef unsigned long long int UINT;
using namespace std;
__global__ void GPU(int *dev_table, int startIdx, int curjobs, const int rowsize, int startx, int starty){
int thread = blockIdx.x * blockDim.x + threadIdx.x;
if (thread < curjobs){
int idx = startIdx + (thread * rowsize - thread);
dev_table[idx] = (dev_table[idx-1] + dev_table[idx-rowsize] + dev_table[idx]
+ dev_table[idx+1] + dev_table[idx+rowsize]) / 5;
}
}
void checkGPUError(hipError_t err){
if (hipSuccess != err){
printf("CUDA error in file %s, in line %i: %s\n", __FILE__, __LINE__, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void SOR(int n1, int n2, int *arr){
int paddsize = 1;
int rowsize = n1 + 2 * paddsize;
int colsize = n2 + 2 * paddsize;
int *dev_table;
size_t freeMem, totalMem;
hipMemGetInfo(&freeMem, &totalMem);
int tablesize = rowsize * colsize;
cout << "current GPU memory info FREE: " << freeMem << " Bytes, Total: " << totalMem << " Bytes.";
cout << "colsize: " << colsize << ", rowsize: " << rowsize << ", allocates: " << tablesize * sizeof(int)<< " Bytes." << endl;
hipError_t err = hipMalloc(&dev_table, tablesize * sizeof(int));
checkGPUError(err);
hipMemcpy(dev_table, arr, tablesize * sizeof(int), hipMemcpyHostToDevice);
int maxthreads = min(n1 ,n2);
int maxlevel = n1 + n2 - 1;
int curlevel = 1;
int curjobs = 1;
int startx, starty;
int threadPerBlock = 128, blockPerGrid;
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
//suppose n1 is the row size and the longer array
while(curlevel <= maxlevel){
// cout << "level: " << curlevel << endl;
int startIdx;
if (curlevel <= n1){
startIdx = curlevel - 1;
curjobs = curlevel;
startx = startIdx;
starty = 0;
}
else{
startIdx = n1 - 1 + rowsize * (curlevel - n1);
curjobs = 2 * n1 - curlevel;
startx = n1 - 1;
starty = curlevel - n1;
}
int numthreads = (curjobs + 31) / 32;
numthreads *= 32;
blockPerGrid = (numthreads + threadPerBlock - 1) / threadPerBlock;
GPU<<<blockPerGrid, threadPerBlock>>>(&dev_table[paddsize*rowsize+paddsize], startIdx, curjobs, rowsize, startx, starty);
hipDeviceSynchronize();
curlevel++;
}
// cudaMemcpy(table, dev_table, (n1+paddsize)*rowsize*sizeof(int), cudaMemcpyDeviceToHost);
/*
//display table
cout << "full table: " << endl;
for (int i=0; i<n1+paddsize; i++){
for (int j=0; j<n2+paddsize; j++){
cout << table[i * rowsize + j] << " ";
}
cout << endl;
}
*/
hipFree(dev_table);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include<iostream>
#include<cstdlib>
#include<fstream>
#include<string>
#include<sys/time.h>
typedef unsigned long long int UINT;
using namespace std;
__global__ void GPU(int *dev_table, int startIdx, int curjobs, const int rowsize, int startx, int starty){
int thread = blockIdx.x * blockDim.x + threadIdx.x;
if (thread < curjobs){
int idx = startIdx + (thread * rowsize - thread);
dev_table[idx] = (dev_table[idx-1] + dev_table[idx-rowsize] + dev_table[idx]
+ dev_table[idx+1] + dev_table[idx+rowsize]) / 5;
}
}
void checkGPUError(hipError_t err){
if (hipSuccess != err){
printf("CUDA error in file %s, in line %i: %s\n", __FILE__, __LINE__, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void SOR(int n1, int n2, int *arr){
int paddsize = 1;
int rowsize = n1 + 2 * paddsize;
int colsize = n2 + 2 * paddsize;
int *dev_table;
size_t freeMem, totalMem;
hipMemGetInfo(&freeMem, &totalMem);
int tablesize = rowsize * colsize;
cout << "current GPU memory info FREE: " << freeMem << " Bytes, Total: " << totalMem << " Bytes.";
cout << "colsize: " << colsize << ", rowsize: " << rowsize << ", allocates: " << tablesize * sizeof(int)<< " Bytes." << endl;
hipError_t err = hipMalloc(&dev_table, tablesize * sizeof(int));
checkGPUError(err);
hipMemcpy(dev_table, arr, tablesize * sizeof(int), hipMemcpyHostToDevice);
int maxthreads = min(n1 ,n2);
int maxlevel = n1 + n2 - 1;
int curlevel = 1;
int curjobs = 1;
int startx, starty;
int threadPerBlock = 128, blockPerGrid;
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
//suppose n1 is the row size and the longer array
while(curlevel <= maxlevel){
// cout << "level: " << curlevel << endl;
int startIdx;
if (curlevel <= n1){
startIdx = curlevel - 1;
curjobs = curlevel;
startx = startIdx;
starty = 0;
}
else{
startIdx = n1 - 1 + rowsize * (curlevel - n1);
curjobs = 2 * n1 - curlevel;
startx = n1 - 1;
starty = curlevel - n1;
}
int numthreads = (curjobs + 31) / 32;
numthreads *= 32;
blockPerGrid = (numthreads + threadPerBlock - 1) / threadPerBlock;
GPU<<<blockPerGrid, threadPerBlock>>>(&dev_table[paddsize*rowsize+paddsize], startIdx, curjobs, rowsize, startx, starty);
hipDeviceSynchronize();
curlevel++;
}
// cudaMemcpy(table, dev_table, (n1+paddsize)*rowsize*sizeof(int), cudaMemcpyDeviceToHost);
/*
//display table
cout << "full table: " << endl;
for (int i=0; i<n1+paddsize; i++){
for (int j=0; j<n2+paddsize; j++){
cout << table[i * rowsize + j] << " ";
}
cout << endl;
}
*/
hipFree(dev_table);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3GPUPiiiiii
.globl _Z3GPUPiiiiii
.p2align 8
.type _Z3GPUPiiiiii,@function
_Z3GPUPiiiiii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0xc
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_clause 0x2
s_load_b32 s3, s[0:1], 0x10
s_load_b32 s2, s[0:1], 0x8
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_add_i32 s4, s3, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v1, s4, s[2:3]
v_add_nc_u32_e32 v4, s3, v2
v_ashrrev_i32_e32 v3, 31, v2
v_subrev_nc_u32_e32 v0, s3, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_clause 0x3
global_load_b32 v6, v[2:3], off offset:-4
global_load_b32 v4, v[4:5], off
global_load_b32 v5, v[0:1], off
global_load_b64 v[0:1], v[2:3], off
s_waitcnt vmcnt(0)
v_add3_u32 v0, v5, v6, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v0, v0, v1, v4
v_mul_hi_i32 v0, v0, 0x66666667
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v1, 31, v0
v_ashrrev_i32_e32 v0, 1, v0
v_add_nc_u32_e32 v0, v0, v1
global_store_b32 v[2:3], v0, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3GPUPiiiiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3GPUPiiiiii, .Lfunc_end0-_Z3GPUPiiiiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3GPUPiiiiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3GPUPiiiiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include<iostream>
#include<cstdlib>
#include<fstream>
#include<string>
#include<sys/time.h>
typedef unsigned long long int UINT;
using namespace std;
__global__ void GPU(int *dev_table, int startIdx, int curjobs, const int rowsize, int startx, int starty){
int thread = blockIdx.x * blockDim.x + threadIdx.x;
if (thread < curjobs){
int idx = startIdx + (thread * rowsize - thread);
dev_table[idx] = (dev_table[idx-1] + dev_table[idx-rowsize] + dev_table[idx]
+ dev_table[idx+1] + dev_table[idx+rowsize]) / 5;
}
}
void checkGPUError(hipError_t err){
if (hipSuccess != err){
printf("CUDA error in file %s, in line %i: %s\n", __FILE__, __LINE__, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void SOR(int n1, int n2, int *arr){
int paddsize = 1;
int rowsize = n1 + 2 * paddsize;
int colsize = n2 + 2 * paddsize;
int *dev_table;
size_t freeMem, totalMem;
hipMemGetInfo(&freeMem, &totalMem);
int tablesize = rowsize * colsize;
cout << "current GPU memory info FREE: " << freeMem << " Bytes, Total: " << totalMem << " Bytes.";
cout << "colsize: " << colsize << ", rowsize: " << rowsize << ", allocates: " << tablesize * sizeof(int)<< " Bytes." << endl;
hipError_t err = hipMalloc(&dev_table, tablesize * sizeof(int));
checkGPUError(err);
hipMemcpy(dev_table, arr, tablesize * sizeof(int), hipMemcpyHostToDevice);
int maxthreads = min(n1 ,n2);
int maxlevel = n1 + n2 - 1;
int curlevel = 1;
int curjobs = 1;
int startx, starty;
int threadPerBlock = 128, blockPerGrid;
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
//suppose n1 is the row size and the longer array
while(curlevel <= maxlevel){
// cout << "level: " << curlevel << endl;
int startIdx;
if (curlevel <= n1){
startIdx = curlevel - 1;
curjobs = curlevel;
startx = startIdx;
starty = 0;
}
else{
startIdx = n1 - 1 + rowsize * (curlevel - n1);
curjobs = 2 * n1 - curlevel;
startx = n1 - 1;
starty = curlevel - n1;
}
int numthreads = (curjobs + 31) / 32;
numthreads *= 32;
blockPerGrid = (numthreads + threadPerBlock - 1) / threadPerBlock;
GPU<<<blockPerGrid, threadPerBlock>>>(&dev_table[paddsize*rowsize+paddsize], startIdx, curjobs, rowsize, startx, starty);
hipDeviceSynchronize();
curlevel++;
}
// cudaMemcpy(table, dev_table, (n1+paddsize)*rowsize*sizeof(int), cudaMemcpyDeviceToHost);
/*
//display table
cout << "full table: " << endl;
for (int i=0; i<n1+paddsize; i++){
for (int j=0; j<n2+paddsize; j++){
cout << table[i * rowsize + j] << " ";
}
cout << endl;
}
*/
hipFree(dev_table);
} | .text
.file "GPU.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__GPUPiiiiii # -- Begin function _Z18__device_stub__GPUPiiiiii
.p2align 4, 0x90
.type _Z18__device_stub__GPUPiiiiii,@function
_Z18__device_stub__GPUPiiiiii: # @_Z18__device_stub__GPUPiiiiii
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 72(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movl %ecx, 12(%rsp)
movl %r8d, 8(%rsp)
movl %r9d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3GPUPiiiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z18__device_stub__GPUPiiiiii, .Lfunc_end0-_Z18__device_stub__GPUPiiiiii
.cfi_endproc
# -- End function
.globl _Z13checkGPUError10hipError_t # -- Begin function _Z13checkGPUError10hipError_t
.p2align 4, 0x90
.type _Z13checkGPUError10hipError_t,@function
_Z13checkGPUError10hipError_t: # @_Z13checkGPUError10hipError_t
.cfi_startproc
# %bb.0:
testl %edi, %edi
jne .LBB1_2
# %bb.1:
retq
.LBB1_2:
pushq %rax
.cfi_def_cfa_offset 16
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $25, %edx
movq %rax, %rcx
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end1:
.size _Z13checkGPUError10hipError_t, .Lfunc_end1-_Z13checkGPUError10hipError_t
.cfi_endproc
# -- End function
.globl _Z3SORiiPi # -- Begin function _Z3SORiiPi
.p2align 4, 0x90
.type _Z3SORiiPi,@function
_Z3SORiiPi: # @_Z3SORiiPi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $216, %rsp
.cfi_def_cfa_offset 272
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdx, %r14
# kill: def $esi killed $esi def $rsi
# kill: def $edi killed $edi def $rdi
movq %rdi, 8(%rsp) # 8-byte Spill
leal 2(%rdi), %ebp
movq %rsi, %rbx
leal 2(%rsi), %r12d
leaq 96(%rsp), %rdi
leaq 88(%rsp), %rsi
callq hipMemGetInfo
movl %r12d, %r13d
imull %ebp, %r13d
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $30, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq 96(%rsp), %rsi
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r15
movl $.L.str.3, %esi
movl $15, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq 88(%rsp), %rsi
movq %r15, %rdi
callq _ZNSo9_M_insertImEERSoT_
movl $.L.str.4, %esi
movl $7, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $9, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r12d, %esi
callq _ZNSolsEi
movq %rax, %r15
movl $.L.str.6, %esi
movl $11, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r15, %rdi
movl %ebp, 4(%rsp) # 4-byte Spill
movl %ebp, %esi
callq _ZNSolsEi
movq %rax, %r15
movl $.L.str.7, %esi
movl $13, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movslq %r13d, %r12
shlq $2, %r12
movq %r15, %rdi
movq %r12, %rsi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r13
movl $.L.str.4, %esi
movl $7, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%r13), %rax
movq -24(%rax), %rax
movq 240(%r13,%rax), %r15
testq %r15, %r15
je .LBB2_14
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r15)
je .LBB2_3
# %bb.2:
movzbl 67(%r15), %eax
jmp .LBB2_4
.LBB2_3:
movq %r15, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r15), %rax
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movq %r13, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
leaq 16(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB2_15
# %bb.5: # %_Z13checkGPUError10hipError_t.exit
movq 16(%rsp), %rdi
movl $1, %ebp
movq %r14, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
addl 8(%rsp), %ebx # 4-byte Folded Reload
movl $2, %edi
callq hipDeviceSetCacheConfig
movq %rbx, 80(%rsp) # 8-byte Spill
cmpl $2, %ebx
jl .LBB2_13
# %bb.6: # %.lr.ph
movabsq $4294967296, %rdx # imm = 0x100000000
movq 8(%rsp), %rcx # 8-byte Reload
leal -1(%rcx), %eax
movl %eax, 32(%rsp) # 4-byte Spill
leal (%rcx,%rcx), %eax
movl %eax, 28(%rsp) # 4-byte Spill
movslq %ecx, %rax
movq %rax, 56(%rsp) # 8-byte Spill
movl %ecx, %eax
negl %eax
movq %rax, 64(%rsp) # 8-byte Spill
movl $1, %eax
subl %ecx, %eax
imull 4(%rsp), %eax # 4-byte Folded Reload
leal (%rcx,%rax), %r12d
decl %r12d
leaq 128(%rdx), %rax
movq %rax, 72(%rsp) # 8-byte Spill
jmp .LBB2_7
.p2align 4, 0x90
.LBB2_12: # in Loop: Header=BB2_7 Depth=1
callq hipDeviceSynchronize
incl %ebp
addl 4(%rsp), %r12d # 4-byte Folded Reload
cmpl %ebp, 80(%rsp) # 4-byte Folded Reload
je .LBB2_13
.LBB2_7: # =>This Inner Loop Header: Depth=1
cmpl 8(%rsp), %ebp # 4-byte Folded Reload
jle .LBB2_8
# %bb.9: # in Loop: Header=BB2_7 Depth=1
movq 64(%rsp), %rax # 8-byte Reload
leal (%rax,%rbp), %ebx
movl 28(%rsp), %eax # 4-byte Reload
movl %eax, %r13d
subl %ebp, %r13d
movl %r12d, %r14d
movl 32(%rsp), %r15d # 4-byte Reload
jmp .LBB2_10
.p2align 4, 0x90
.LBB2_8: # in Loop: Header=BB2_7 Depth=1
leal -1(%rbp), %r14d
xorl %ebx, %ebx
movl %r14d, %r15d
movl %ebp, %r13d
.LBB2_10: # in Loop: Header=BB2_7 Depth=1
leal 31(%r13), %eax
leal 62(%r13), %edi
testl %eax, %eax
cmovnsl %eax, %edi
andl $-32, %edi
leal 127(%rdi), %eax
addl $254, %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $7, %edi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl $1, %esi
movq 72(%rsp), %rdx # 8-byte Reload
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_12
# %bb.11: # in Loop: Header=BB2_7 Depth=1
movq 16(%rsp), %rax
movq 56(%rsp), %rcx # 8-byte Reload
leaq (%rax,%rcx,4), %rax
addq $12, %rax
movq %rax, 152(%rsp)
movl %r14d, 52(%rsp)
movl %r13d, 48(%rsp)
movl 4(%rsp), %eax # 4-byte Reload
movl %eax, 44(%rsp)
movl %r15d, 40(%rsp)
movl %ebx, 36(%rsp)
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 52(%rsp), %rax
movq %rax, 168(%rsp)
leaq 48(%rsp), %rax
movq %rax, 176(%rsp)
leaq 44(%rsp), %rax
movq %rax, 184(%rsp)
leaq 40(%rsp), %rax
movq %rax, 192(%rsp)
leaq 36(%rsp), %rax
movq %rax, 200(%rsp)
leaq 136(%rsp), %rdi
leaq 120(%rsp), %rsi
leaq 112(%rsp), %rdx
leaq 104(%rsp), %rcx
callq __hipPopCallConfiguration
movq 136(%rsp), %rsi
movl 144(%rsp), %edx
movq 120(%rsp), %rcx
movl 128(%rsp), %r8d
movl $_Z3GPUPiiiiii, %edi
leaq 160(%rsp), %r9
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB2_12
.LBB2_13: # %._crit_edge
movq 16(%rsp), %rdi
callq hipFree
addq $216, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_14:
.cfi_def_cfa_offset 272
callq _ZSt16__throw_bad_castv
.LBB2_15:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $25, %edx
movq %rax, %rcx
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end2:
.size _Z3SORiiPi, .Lfunc_end2-_Z3SORiiPi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3GPUPiiiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3GPUPiiiiii,@object # @_Z3GPUPiiiiii
.section .rodata,"a",@progbits
.globl _Z3GPUPiiiiii
.p2align 3, 0x0
_Z3GPUPiiiiii:
.quad _Z18__device_stub__GPUPiiiiii
.size _Z3GPUPiiiiii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "CUDA error in file %s, in line %i: %s\n"
.size .L.str, 39
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/zzzhe1990/GPUMemEfficient/master/2D-HEAT/GPU.hip"
.size .L.str.1, 106
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "current GPU memory info FREE: "
.size .L.str.2, 31
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz " Bytes, Total: "
.size .L.str.3, 16
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz " Bytes."
.size .L.str.4, 8
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "colsize: "
.size .L.str.5, 10
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz ", rowsize: "
.size .L.str.6, 12
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz ", allocates: "
.size .L.str.7, 14
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3GPUPiiiiii"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__GPUPiiiiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3GPUPiiiiii
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3GPUPiiiiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD R0, R0, c[0x0][0x170], -R0 ; /* 0x00005c0000007a24 */
/* 0x000fe200078e0a00 */
/*0070*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0090*/ IADD3 R0, R0, c[0x0][0x168], RZ ; /* 0x00005a0000007a10 */
/* 0x000fc80007ffe0ff */
/*00a0*/ IADD3 R4, R0.reuse, -c[0x0][0x170], RZ ; /* 0x80005c0000047a10 */
/* 0x040fe40007ffe0ff */
/*00b0*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0207 */
/*00c0*/ IMAD.WIDE R4, R4, R7, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe200078e0207 */
/*00d0*/ LDG.E R9, [R2.64+-0x4] ; /* 0xfffffc0402097981 */
/* 0x000ea6000c1e1900 */
/*00e0*/ IMAD.WIDE R6, R7, c[0x0][0x170], R2 ; /* 0x00005c0007067a25 */
/* 0x000fe200078e0202 */
/*00f0*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0110*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ee8000c1e1900 */
/*0120*/ LDG.E R11, [R2.64+0x4] ; /* 0x00000404020b7981 */
/* 0x000ee2000c1e1900 */
/*0130*/ IADD3 R0, R0, R4, R9 ; /* 0x0000000400007210 */
/* 0x004fc80007ffe009 */
/*0140*/ IADD3 R0, R6, R0, R11 ; /* 0x0000000006007210 */
/* 0x008fca0007ffe00b */
/*0150*/ IMAD.HI R0, R0, 0x66666667, RZ ; /* 0x6666666700007827 */
/* 0x000fca00078e02ff */
/*0160*/ SHF.R.S32.HI R9, RZ, 0x1, R0 ; /* 0x00000001ff097819 */
/* 0x000fc80000011400 */
/*0170*/ LEA.HI R9, R0, R9, RZ, 0x1 ; /* 0x0000000900097211 */
/* 0x000fca00078f08ff */
/*0180*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x000fe2000c101904 */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ BRA 0x1a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3GPUPiiiiii
.globl _Z3GPUPiiiiii
.p2align 8
.type _Z3GPUPiiiiii,@function
_Z3GPUPiiiiii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0xc
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_clause 0x2
s_load_b32 s3, s[0:1], 0x10
s_load_b32 s2, s[0:1], 0x8
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_add_i32 s4, s3, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v1, s4, s[2:3]
v_add_nc_u32_e32 v4, s3, v2
v_ashrrev_i32_e32 v3, 31, v2
v_subrev_nc_u32_e32 v0, s3, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_clause 0x3
global_load_b32 v6, v[2:3], off offset:-4
global_load_b32 v4, v[4:5], off
global_load_b32 v5, v[0:1], off
global_load_b64 v[0:1], v[2:3], off
s_waitcnt vmcnt(0)
v_add3_u32 v0, v5, v6, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v0, v0, v1, v4
v_mul_hi_i32 v0, v0, 0x66666667
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v1, 31, v0
v_ashrrev_i32_e32 v0, 1, v0
v_add_nc_u32_e32 v0, v0, v1
global_store_b32 v[2:3], v0, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3GPUPiiiiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3GPUPiiiiii, .Lfunc_end0-_Z3GPUPiiiiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3GPUPiiiiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3GPUPiiiiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000fca0f_00000000-6_GPU.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3804:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3804:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "/home/ubuntu/Datasets/stackv2/train-structured/zzzhe1990/GPUMemEfficient/master/2D-HEAT/GPU.cu"
.align 8
.LC1:
.string "CUDA error in file %s, in line %i: %s\n"
.text
.globl _Z13checkGPUError9cudaError
.type _Z13checkGPUError9cudaError, @function
_Z13checkGPUError9cudaError:
.LFB3800:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L8
ret
.L8:
subq $8, %rsp
.cfi_def_cfa_offset 16
call cudaGetErrorString@PLT
movq %rax, %r8
movl $23, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE3800:
.size _Z13checkGPUError9cudaError, .-_Z13checkGPUError9cudaError
.globl _Z27__device_stub__Z3GPUPiiiiiiPiiiiii
.type _Z27__device_stub__Z3GPUPiiiiiiPiiiiii, @function
_Z27__device_stub__Z3GPUPiiiiiiPiiiiii:
.LFB3826:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movl %ecx, 12(%rsp)
movl %r8d, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 12(%rsp), %rax
movq %rax, 120(%rsp)
leaq 8(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 184
pushq 40(%rsp)
.cfi_def_cfa_offset 192
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3GPUPiiiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3826:
.size _Z27__device_stub__Z3GPUPiiiiiiPiiiiii, .-_Z27__device_stub__Z3GPUPiiiiiiPiiiiii
.globl _Z3GPUPiiiiii
.type _Z3GPUPiiiiii, @function
_Z3GPUPiiiiii:
.LFB3827:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z3GPUPiiiiiiPiiiiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3827:
.size _Z3GPUPiiiiii, .-_Z3GPUPiiiiii
.section .rodata.str1.8
.align 8
.LC2:
.string "current GPU memory info FREE: "
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string " Bytes, Total: "
.LC4:
.string " Bytes."
.LC5:
.string "colsize: "
.LC6:
.string ", rowsize: "
.LC7:
.string ", allocates: "
.text
.globl _Z3SORiiPi
.type _Z3SORiiPi, @function
_Z3SORiiPi:
.LFB3801:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movl %edi, %r15d
movl %esi, %ebx
movq %rdx, (%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leal 2(%rdi), %r13d
leal 2(%rsi), %r14d
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdi
call cudaMemGetInfo@PLT
movl %r13d, %eax
imull %r14d, %eax
movl %eax, 8(%rsp)
movl $30, %edx
leaq .LC2(%rip), %rsi
leaq _ZSt4cout(%rip), %rbp
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq 48(%rsp), %rsi
movq %rbp, %rdi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %r12
movl $15, %edx
leaq .LC3(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq 56(%rsp), %rsi
movq %r12, %rdi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
movl $7, %edx
leaq .LC4(%rip), %r12
movq %r12, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl $9, %edx
leaq .LC5(%rip), %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %r14d, %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $11, %edx
leaq .LC6(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %r13d, %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $13, %edx
leaq .LC7(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movslq 8(%rsp), %r14
salq $2, %r14
movq %r14, %rsi
movq %rbp, %rdi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rbp
movl $7, %edx
movq %r12, %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq 0(%rbp), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %r12
testq %r12, %r12
je .L30
cmpb $0, 56(%r12)
je .L20
movzbl 67(%r12), %esi
.L21:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
leaq 40(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
movl %eax, %edi
call _Z13checkGPUError9cudaError
movl $1, %ecx
movq %r14, %rdx
movq (%rsp), %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
leal (%r15,%rbx), %eax
movl %eax, %ebx
movl %eax, 16(%rsp)
movl $2, %edi
call cudaDeviceSetCacheConfig@PLT
cmpl $1, %ebx
jle .L22
leal 3(%r15), %eax
cltq
salq $2, %rax
movq %rax, 24(%rsp)
leal -1(%r15,%r15), %r12d
movl $1, %eax
subl %r15d, %eax
imull %r13d, %eax
leal -1(%r15,%rax), %r14d
movl $1, %ebx
leal -1(%r15), %eax
movl %eax, 20(%rsp)
jmp .L26
.L30:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L31
call _ZSt16__throw_bad_castv@PLT
.L31:
call __stack_chk_fail@PLT
.L20:
movq %r12, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r12), %rax
movl $10, %esi
movq %r12, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L21
.L23:
movl 20(%rsp), %eax
movl %eax, (%rsp)
movl %r15d, %eax
subl %r12d, %eax
movl %eax, 8(%rsp)
movl %r14d, 12(%rsp)
movl %r12d, %ebp
.L24:
leal 62(%rbp), %eax
movl %ebp, %edx
addl $31, %edx
cmovns %edx, %eax
movl $128, 76(%rsp)
movl $1, 80(%rsp)
andl $-32, %eax
leal 254(%rax), %edx
addl $127, %eax
cmovs %edx, %eax
sarl $7, %eax
movl %eax, 64(%rsp)
movl $1, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L32
.L25:
call cudaDeviceSynchronize@PLT
addl $1, %ebx
subl $1, %r12d
addl %r13d, %r14d
cmpl %ebx, 16(%rsp)
je .L22
.L26:
cmpl %ebx, %r15d
jl .L23
leal -1(%rbx), %eax
movl %eax, (%rsp)
movl %eax, 12(%rsp)
movl %ebx, %ebp
movl $0, 8(%rsp)
jmp .L24
.L32:
movq 24(%rsp), %rdi
addq 40(%rsp), %rdi
movl 8(%rsp), %r9d
movl (%rsp), %r8d
movl %r13d, %ecx
movl %ebp, %edx
movl 12(%rsp), %esi
call _Z27__device_stub__Z3GPUPiiiiiiPiiiiii
jmp .L25
.L22:
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L33
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3801:
.size _Z3SORiiPi, .-_Z3SORiiPi
.section .rodata.str1.1
.LC8:
.string "_Z3GPUPiiiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3829:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z3GPUPiiiiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3829:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "GPU.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__GPUPiiiiii # -- Begin function _Z18__device_stub__GPUPiiiiii
.p2align 4, 0x90
.type _Z18__device_stub__GPUPiiiiii,@function
_Z18__device_stub__GPUPiiiiii: # @_Z18__device_stub__GPUPiiiiii
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 72(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movl %ecx, 12(%rsp)
movl %r8d, 8(%rsp)
movl %r9d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3GPUPiiiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z18__device_stub__GPUPiiiiii, .Lfunc_end0-_Z18__device_stub__GPUPiiiiii
.cfi_endproc
# -- End function
.globl _Z13checkGPUError10hipError_t # -- Begin function _Z13checkGPUError10hipError_t
.p2align 4, 0x90
.type _Z13checkGPUError10hipError_t,@function
_Z13checkGPUError10hipError_t: # @_Z13checkGPUError10hipError_t
.cfi_startproc
# %bb.0:
testl %edi, %edi
jne .LBB1_2
# %bb.1:
retq
.LBB1_2:
pushq %rax
.cfi_def_cfa_offset 16
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $25, %edx
movq %rax, %rcx
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end1:
.size _Z13checkGPUError10hipError_t, .Lfunc_end1-_Z13checkGPUError10hipError_t
.cfi_endproc
# -- End function
.globl _Z3SORiiPi # -- Begin function _Z3SORiiPi
.p2align 4, 0x90
.type _Z3SORiiPi,@function
_Z3SORiiPi: # @_Z3SORiiPi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $216, %rsp
.cfi_def_cfa_offset 272
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdx, %r14
# kill: def $esi killed $esi def $rsi
# kill: def $edi killed $edi def $rdi
movq %rdi, 8(%rsp) # 8-byte Spill
leal 2(%rdi), %ebp
movq %rsi, %rbx
leal 2(%rsi), %r12d
leaq 96(%rsp), %rdi
leaq 88(%rsp), %rsi
callq hipMemGetInfo
movl %r12d, %r13d
imull %ebp, %r13d
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $30, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq 96(%rsp), %rsi
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r15
movl $.L.str.3, %esi
movl $15, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq 88(%rsp), %rsi
movq %r15, %rdi
callq _ZNSo9_M_insertImEERSoT_
movl $.L.str.4, %esi
movl $7, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $9, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r12d, %esi
callq _ZNSolsEi
movq %rax, %r15
movl $.L.str.6, %esi
movl $11, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r15, %rdi
movl %ebp, 4(%rsp) # 4-byte Spill
movl %ebp, %esi
callq _ZNSolsEi
movq %rax, %r15
movl $.L.str.7, %esi
movl $13, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movslq %r13d, %r12
shlq $2, %r12
movq %r15, %rdi
movq %r12, %rsi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r13
movl $.L.str.4, %esi
movl $7, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%r13), %rax
movq -24(%rax), %rax
movq 240(%r13,%rax), %r15
testq %r15, %r15
je .LBB2_14
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r15)
je .LBB2_3
# %bb.2:
movzbl 67(%r15), %eax
jmp .LBB2_4
.LBB2_3:
movq %r15, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r15), %rax
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movq %r13, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
leaq 16(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB2_15
# %bb.5: # %_Z13checkGPUError10hipError_t.exit
movq 16(%rsp), %rdi
movl $1, %ebp
movq %r14, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
addl 8(%rsp), %ebx # 4-byte Folded Reload
movl $2, %edi
callq hipDeviceSetCacheConfig
movq %rbx, 80(%rsp) # 8-byte Spill
cmpl $2, %ebx
jl .LBB2_13
# %bb.6: # %.lr.ph
movabsq $4294967296, %rdx # imm = 0x100000000
movq 8(%rsp), %rcx # 8-byte Reload
leal -1(%rcx), %eax
movl %eax, 32(%rsp) # 4-byte Spill
leal (%rcx,%rcx), %eax
movl %eax, 28(%rsp) # 4-byte Spill
movslq %ecx, %rax
movq %rax, 56(%rsp) # 8-byte Spill
movl %ecx, %eax
negl %eax
movq %rax, 64(%rsp) # 8-byte Spill
movl $1, %eax
subl %ecx, %eax
imull 4(%rsp), %eax # 4-byte Folded Reload
leal (%rcx,%rax), %r12d
decl %r12d
leaq 128(%rdx), %rax
movq %rax, 72(%rsp) # 8-byte Spill
jmp .LBB2_7
.p2align 4, 0x90
.LBB2_12: # in Loop: Header=BB2_7 Depth=1
callq hipDeviceSynchronize
incl %ebp
addl 4(%rsp), %r12d # 4-byte Folded Reload
cmpl %ebp, 80(%rsp) # 4-byte Folded Reload
je .LBB2_13
.LBB2_7: # =>This Inner Loop Header: Depth=1
cmpl 8(%rsp), %ebp # 4-byte Folded Reload
jle .LBB2_8
# %bb.9: # in Loop: Header=BB2_7 Depth=1
movq 64(%rsp), %rax # 8-byte Reload
leal (%rax,%rbp), %ebx
movl 28(%rsp), %eax # 4-byte Reload
movl %eax, %r13d
subl %ebp, %r13d
movl %r12d, %r14d
movl 32(%rsp), %r15d # 4-byte Reload
jmp .LBB2_10
.p2align 4, 0x90
.LBB2_8: # in Loop: Header=BB2_7 Depth=1
leal -1(%rbp), %r14d
xorl %ebx, %ebx
movl %r14d, %r15d
movl %ebp, %r13d
.LBB2_10: # in Loop: Header=BB2_7 Depth=1
leal 31(%r13), %eax
leal 62(%r13), %edi
testl %eax, %eax
cmovnsl %eax, %edi
andl $-32, %edi
leal 127(%rdi), %eax
addl $254, %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $7, %edi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl $1, %esi
movq 72(%rsp), %rdx # 8-byte Reload
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_12
# %bb.11: # in Loop: Header=BB2_7 Depth=1
movq 16(%rsp), %rax
movq 56(%rsp), %rcx # 8-byte Reload
leaq (%rax,%rcx,4), %rax
addq $12, %rax
movq %rax, 152(%rsp)
movl %r14d, 52(%rsp)
movl %r13d, 48(%rsp)
movl 4(%rsp), %eax # 4-byte Reload
movl %eax, 44(%rsp)
movl %r15d, 40(%rsp)
movl %ebx, 36(%rsp)
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 52(%rsp), %rax
movq %rax, 168(%rsp)
leaq 48(%rsp), %rax
movq %rax, 176(%rsp)
leaq 44(%rsp), %rax
movq %rax, 184(%rsp)
leaq 40(%rsp), %rax
movq %rax, 192(%rsp)
leaq 36(%rsp), %rax
movq %rax, 200(%rsp)
leaq 136(%rsp), %rdi
leaq 120(%rsp), %rsi
leaq 112(%rsp), %rdx
leaq 104(%rsp), %rcx
callq __hipPopCallConfiguration
movq 136(%rsp), %rsi
movl 144(%rsp), %edx
movq 120(%rsp), %rcx
movl 128(%rsp), %r8d
movl $_Z3GPUPiiiiii, %edi
leaq 160(%rsp), %r9
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB2_12
.LBB2_13: # %._crit_edge
movq 16(%rsp), %rdi
callq hipFree
addq $216, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_14:
.cfi_def_cfa_offset 272
callq _ZSt16__throw_bad_castv
.LBB2_15:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $25, %edx
movq %rax, %rcx
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end2:
.size _Z3SORiiPi, .Lfunc_end2-_Z3SORiiPi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3GPUPiiiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3GPUPiiiiii,@object # @_Z3GPUPiiiiii
.section .rodata,"a",@progbits
.globl _Z3GPUPiiiiii
.p2align 3, 0x0
_Z3GPUPiiiiii:
.quad _Z18__device_stub__GPUPiiiiii
.size _Z3GPUPiiiiii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "CUDA error in file %s, in line %i: %s\n"
.size .L.str, 39
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/zzzhe1990/GPUMemEfficient/master/2D-HEAT/GPU.hip"
.size .L.str.1, 106
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "current GPU memory info FREE: "
.size .L.str.2, 31
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz " Bytes, Total: "
.size .L.str.3, 16
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz " Bytes."
.size .L.str.4, 8
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "colsize: "
.size .L.str.5, 10
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz ", rowsize: "
.size .L.str.6, 12
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz ", allocates: "
.size .L.str.7, 14
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3GPUPiiiiii"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__GPUPiiiiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3GPUPiiiiii
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<stdio.h>
#include<stdlib.h>
#define TPB 8
#define N 32
__device__ float distance(float x1, float x2)
{
return sqrt ((x2-x1)*(x2-x1));
}
__global__ void distanceKernel(float *d_out, float *d_in, float ref)
{
const int i=blockIdx.x*blockDim.x+threadIdx.x;
const int j=blockIdx.x;
const int k=threadIdx.x;
const int l=blockDim.x;
const float x=d_in[i];
d_out[i]=distance(x, ref);
printf("blockDIM=%d, blockID=%d, threadID=%d, i=%d: the distance between %f to %f is %f. \n", l, j, k, i, ref, x, d_out[i]); ////
}
void distanceArray(float *out, float *in, float ref, int len)
{
float *d_in=0;
float *d_out=0; ////
cudaMalloc(&d_in, len*sizeof(float));
cudaMalloc(&d_out, len*sizeof(float));
cudaMemcpy(d_in, in, len*sizeof(float), cudaMemcpyHostToDevice);
distanceKernel<<<len/TPB, TPB>>>(d_out, d_in, ref);
cudaMemcpy(out, d_out, len*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
float scale(int i, int n)
{
return ((float) i)/(n-1);
}
int main()
{
const float ref=0.5f;
float *in=(float*) calloc(N,sizeof(float));
float *out=(float*) calloc(N, sizeof(float));
for(int i=0; i<N; ++i)
{
in[i]=scale(i,N); //
}
distanceArray(out, in, ref, N);
printf("______________________________ \n");
for(int j=0; j<N; ++j)
{
printf("The distance, printed from the host, between %f to %f is %f. \n", ref, in[j], out[j]);
}
free(in);
free(out);
return 0;
} | code for sm_80
Function : _Z14distanceKernelPfS_f
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R8, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff087435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ IADD3 R1, R1, -0x28, RZ ; /* 0xffffffd801017810 */
/* 0x000fe20007ffe0ff */
/*0050*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e240000002100 */
/*0060*/ IMAD R5, R3, c[0x0][0x0], R4 ; /* 0x0000000003057a24 */
/* 0x001fca00078e0204 */
/*0070*/ IMAD.WIDE R8, R5, R8, c[0x0][0x168] ; /* 0x00005a0005087625 */
/* 0x000fca00078e0208 */
/*0080*/ LDG.E R12, [R8.64] ; /* 0x00000004080c7981 */
/* 0x000ea2000c1e1900 */
/*0090*/ IADD3 R6, P1, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */
/* 0x000fe20007f3e0ff */
/*00a0*/ BSSY B0, 0x1d0 ; /* 0x0000012000007945 */
/* 0x000fe80003800000 */
/*00b0*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P1 ; /* 0x00000900ff077624 */
/* 0x000fe400008e06ff */
/*00c0*/ FADD R0, -R12, c[0x0][0x170] ; /* 0x00005c000c007621 */
/* 0x004fc80000000100 */
/*00d0*/ FMUL R0, R0, R0 ; /* 0x0000000000007220 */
/* 0x000fc80000400000 */
/*00e0*/ MUFU.RSQ R11, R0 ; /* 0x00000000000b7308 */
/* 0x0000620000001400 */
/*00f0*/ IADD3 R2, R0, -0xd000000, RZ ; /* 0xf300000000027810 */
/* 0x000fc80007ffe0ff */
/*0100*/ ISETP.GT.U32.AND P0, PT, R2, 0x727fffff, PT ; /* 0x727fffff0200780c */
/* 0x000fda0003f04070 */
/*0110*/ @!P0 BRA 0x180 ; /* 0x0000006000008947 */
/* 0x000fea0003800000 */
/*0120*/ BSSY B1, 0x160 ; /* 0x0000003000017945 */
/* 0x003fe20003800000 */
/*0130*/ MOV R13, 0x150 ; /* 0x00000150000d7802 */
/* 0x000fe40000000f00 */
/*0140*/ CALL.REL.NOINC 0x360 ; /* 0x0000021000007944 */
/* 0x000fea0003c00000 */
/*0150*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0160*/ MOV R0, R2 ; /* 0x0000000200007202 */
/* 0x000fe20000000f00 */
/*0170*/ BRA 0x1c0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0180*/ FMUL.FTZ R9, R0, R11 ; /* 0x0000000b00097220 */
/* 0x003fe40000410000 */
/*0190*/ FMUL.FTZ R11, R11, 0.5 ; /* 0x3f0000000b0b7820 */
/* 0x000fe40000410000 */
/*01a0*/ FFMA R0, -R9, R9, R0 ; /* 0x0000000909007223 */
/* 0x000fc80000000100 */
/*01b0*/ FFMA R0, R0, R11, R9 ; /* 0x0000000b00007223 */
/* 0x000fe40000000009 */
/*01c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*01d0*/ F2F.F64.F32 R12, R12 ; /* 0x0000000c000c7310 */
/* 0x000e220000201800 */
/*01e0*/ HFMA2.MMA R8, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff087435 */
/* 0x000fe200000001ff */
/*01f0*/ MOV R16, 0x0 ; /* 0x0000000000107802 */
/* 0x000fe20000000f00 */
/*0200*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff027624 */
/* 0x000fe200078e00ff */
/*0210*/ STL.64 [R1+0x8], R4 ; /* 0x0000080401007387 */
/* 0x0003e80000100a00 */
/*0220*/ F2F.F64.F32 R14, R0 ; /* 0x00000000000e7310 */
/* 0x000ea20000201800 */
/*0230*/ STL.64 [R1], R2 ; /* 0x0000000201007387 */
/* 0x0007e20000100a00 */
/*0240*/ LDC.64 R16, c[0x4][R16] ; /* 0x0100000010107b82 */
/* 0x000f220000000a00 */
/*0250*/ IMAD.WIDE R8, R5, R8, c[0x0][0x160] ; /* 0x0000580005087625 */
/* 0x000fca00078e0208 */
/*0260*/ F2F.F64.F32 R10, c[0x0][0x170] ; /* 0x00005c00000a7b10 */
/* 0x000f620000201800 */
/*0270*/ STL.64 [R1+0x18], R12 ; /* 0x0000180c01007387 */
/* 0x0017e20000100a00 */
/*0280*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x002fe200078e00ff */
/*0290*/ MOV R5, c[0x4][0xc] ; /* 0x0100030000057a02 */
/* 0x000fe40000000f00 */
/*02a0*/ STG.E [R8.64], R0 ; /* 0x0000000008007986 */
/* 0x0007e8000c101904 */
/*02b0*/ STL.64 [R1+0x20], R14 ; /* 0x0000200e01007387 */
/* 0x0047e80000100a00 */
/*02c0*/ STL.64 [R1+0x10], R10 ; /* 0x0000100a01007387 */
/* 0x0207e40000100a00 */
/*02d0*/ LEPC R2 ; /* 0x000000000002734e */
/* 0x008fe20000000000 */
/*02e0*/ MOV R9, 0x350 ; /* 0x0000035000097802 */
/* 0x000fc40000000f00 */
/*02f0*/ MOV R20, 0x2d0 ; /* 0x000002d000147802 */
/* 0x000fe40000000f00 */
/*0300*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*0310*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*0320*/ IADD3 R20, P0, P1, -R20, R9, R2 ; /* 0x0000000914147210 */
/* 0x000fc8000791e102 */
/*0330*/ IADD3.X R21, ~R0, R21, R3, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2503 */
/*0340*/ CALL.ABS.NOINC R16 ; /* 0x0000000010007343 */
/* 0x010fea0003c00000 */
/*0350*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0360*/ LOP3.LUT P0, RZ, R0, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff00ff7812 */
/* 0x000fda000780c0ff */
/*0370*/ @!P0 IMAD.MOV.U32 R2, RZ, RZ, R0 ; /* 0x000000ffff028224 */
/* 0x000fe200078e0000 */
/*0380*/ @!P0 BRA 0x490 ; /* 0x0000010000008947 */
/* 0x000fea0003800000 */
/*0390*/ FSETP.GEU.FTZ.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720b */
/* 0x000fda0003f1e000 */
/*03a0*/ @!P0 MOV R2, 0x7fffffff ; /* 0x7fffffff00028802 */
/* 0x000fe20000000f00 */
/*03b0*/ @!P0 BRA 0x490 ; /* 0x000000d000008947 */
/* 0x000fea0003800000 */
/*03c0*/ FSETP.GTU.FTZ.AND P0, PT, |R0|, +INF , PT ; /* 0x7f8000000000780b */
/* 0x000fda0003f1c200 */
/*03d0*/ @P0 FADD.FTZ R2, R0, 1 ; /* 0x3f80000000020421 */
/* 0x000fe20000010000 */
/*03e0*/ @P0 BRA 0x490 ; /* 0x000000a000000947 */
/* 0x000fea0003800000 */
/*03f0*/ FSETP.NEU.FTZ.AND P0, PT, |R0|, +INF , PT ; /* 0x7f8000000000780b */
/* 0x000fda0003f1d200 */
/*0400*/ @P0 FFMA R8, R0, 1.84467440737095516160e+19, RZ ; /* 0x5f80000000080823 */
/* 0x000fc800000000ff */
/*0410*/ @P0 MUFU.RSQ R9, R8 ; /* 0x0000000800090308 */
/* 0x000e240000001400 */
/*0420*/ @P0 FMUL.FTZ R11, R8, R9 ; /* 0x00000009080b0220 */
/* 0x001fe40000410000 */
/*0430*/ @P0 FMUL.FTZ R9, R9, 0.5 ; /* 0x3f00000009090820 */
/* 0x000fe40000410000 */
/*0440*/ @P0 FADD.FTZ R2, -R11, -RZ ; /* 0x800000ff0b020221 */
/* 0x000fc80000010100 */
/*0450*/ @P0 FFMA R10, R11, R2, R8 ; /* 0x000000020b0a0223 */
/* 0x000fe40000000008 */
/*0460*/ @!P0 IMAD.MOV.U32 R2, RZ, RZ, R0 ; /* 0x000000ffff028224 */
/* 0x000fe400078e0000 */
/*0470*/ @P0 FFMA R9, R10, R9, R11 ; /* 0x000000090a090223 */
/* 0x000fc8000000000b */
/*0480*/ @P0 FMUL.FTZ R2, R9, 2.3283064365386962891e-10 ; /* 0x2f80000009020820 */
/* 0x000fe40000410000 */
/*0490*/ MOV R8, R13 ; /* 0x0000000d00087202 */
/* 0x000fe20000000f00 */
/*04a0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x0 ; /* 0x00000000ff097424 */
/* 0x000fc800078e00ff */
/*04b0*/ RET.REL.NODEC R8 0x0 ; /* 0xfffffb4008007950 */
/* 0x000fea0003c3ffff */
/*04c0*/ BRA 0x4c0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0500*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<stdio.h>
#include<stdlib.h>
#define TPB 8
#define N 32
__device__ float distance(float x1, float x2)
{
return sqrt ((x2-x1)*(x2-x1));
}
__global__ void distanceKernel(float *d_out, float *d_in, float ref)
{
const int i=blockIdx.x*blockDim.x+threadIdx.x;
const int j=blockIdx.x;
const int k=threadIdx.x;
const int l=blockDim.x;
const float x=d_in[i];
d_out[i]=distance(x, ref);
printf("blockDIM=%d, blockID=%d, threadID=%d, i=%d: the distance between %f to %f is %f. \n", l, j, k, i, ref, x, d_out[i]); ////
}
void distanceArray(float *out, float *in, float ref, int len)
{
float *d_in=0;
float *d_out=0; ////
cudaMalloc(&d_in, len*sizeof(float));
cudaMalloc(&d_out, len*sizeof(float));
cudaMemcpy(d_in, in, len*sizeof(float), cudaMemcpyHostToDevice);
distanceKernel<<<len/TPB, TPB>>>(d_out, d_in, ref);
cudaMemcpy(out, d_out, len*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
float scale(int i, int n)
{
return ((float) i)/(n-1);
}
int main()
{
const float ref=0.5f;
float *in=(float*) calloc(N,sizeof(float));
float *out=(float*) calloc(N, sizeof(float));
for(int i=0; i<N; ++i)
{
in[i]=scale(i,N); //
}
distanceArray(out, in, ref, N);
printf("______________________________ \n");
for(int j=0; j<N; ++j)
{
printf("The distance, printed from the host, between %f to %f is %f. \n", ref, in[j], out[j]);
}
free(in);
free(out);
return 0;
} | .file "tmpxft_00000970_00000000-6_cuda4.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8distanceff
.type _Z8distanceff, @function
_Z8distanceff:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z8distanceff, .-_Z8distanceff
.globl _Z5scaleii
.type _Z5scaleii, @function
_Z5scaleii:
.LFB2059:
.cfi_startproc
endbr64
pxor %xmm0, %xmm0
cvtsi2ssl %edi, %xmm0
subl $1, %esi
pxor %xmm1, %xmm1
cvtsi2ssl %esi, %xmm1
divss %xmm1, %xmm0
ret
.cfi_endproc
.LFE2059:
.size _Z5scaleii, .-_Z5scaleii
.globl _Z37__device_stub__Z14distanceKernelPfS_fPfS_f
.type _Z37__device_stub__Z14distanceKernelPfS_fPfS_f, @function
_Z37__device_stub__Z14distanceKernelPfS_fPfS_f:
.LFB2085:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L10
.L6:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L11
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L10:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14distanceKernelPfS_f(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L6
.L11:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z37__device_stub__Z14distanceKernelPfS_fPfS_f, .-_Z37__device_stub__Z14distanceKernelPfS_fPfS_f
.globl _Z14distanceKernelPfS_f
.type _Z14distanceKernelPfS_f, @function
_Z14distanceKernelPfS_f:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z14distanceKernelPfS_fPfS_f
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z14distanceKernelPfS_f, .-_Z14distanceKernelPfS_f
.globl _Z13distanceArrayPfS_fi
.type _Z13distanceArrayPfS_fi, @function
_Z13distanceArrayPfS_fi:
.LFB2058:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $72, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r12
movq %rsi, %r13
movss %xmm0, 12(%rsp)
movl %edx, %ebp
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movslq %edx, %rbx
salq $2, %rbx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $8, 44(%rsp)
movl $1, 48(%rsp)
leal 7(%rbp), %eax
testl %ebp, %ebp
cmovns %ebp, %eax
sarl $3, %eax
movl %eax, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L15:
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L19
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
movss 12(%rsp), %xmm0
movq 16(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z37__device_stub__Z14distanceKernelPfS_fPfS_f
jmp .L15
.L19:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z13distanceArrayPfS_fi, .-_Z13distanceArrayPfS_fi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "______________________________ \n"
.align 8
.LC4:
.string "The distance, printed from the host, between %f to %f is %f. \n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movl $4, %esi
movl $32, %edi
call calloc@PLT
movq %rax, %rbp
movl $4, %esi
movl $32, %edi
call calloc@PLT
movq %rax, %r12
movl $0, %eax
movss .LC0(%rip), %xmm1
.L21:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss %xmm1, %xmm0
movss %xmm0, 0(%rbp,%rax,4)
addq $1, %rax
cmpq $32, %rax
jne .L21
movl $32, %edx
movss .LC1(%rip), %xmm0
movq %rbp, %rsi
movq %r12, %rdi
call _Z13distanceArrayPfS_fi
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %ebx
leaq .LC4(%rip), %r13
.L22:
pxor %xmm2, %xmm2
cvtss2sd (%r12,%rbx), %xmm2
pxor %xmm1, %xmm1
cvtss2sd 0(%rbp,%rbx), %xmm1
movsd .LC3(%rip), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $3, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq $128, %rbx
jne .L22
movq %rbp, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "_Z14distanceKernelPfS_f"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z14distanceKernelPfS_f(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1106771968
.align 4
.LC1:
.long 1056964608
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC3:
.long 0
.long 1071644672
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<stdio.h>
#include<stdlib.h>
#define TPB 8
#define N 32
__device__ float distance(float x1, float x2)
{
return sqrt ((x2-x1)*(x2-x1));
}
__global__ void distanceKernel(float *d_out, float *d_in, float ref)
{
const int i=blockIdx.x*blockDim.x+threadIdx.x;
const int j=blockIdx.x;
const int k=threadIdx.x;
const int l=blockDim.x;
const float x=d_in[i];
d_out[i]=distance(x, ref);
printf("blockDIM=%d, blockID=%d, threadID=%d, i=%d: the distance between %f to %f is %f. \n", l, j, k, i, ref, x, d_out[i]); ////
}
void distanceArray(float *out, float *in, float ref, int len)
{
float *d_in=0;
float *d_out=0; ////
cudaMalloc(&d_in, len*sizeof(float));
cudaMalloc(&d_out, len*sizeof(float));
cudaMemcpy(d_in, in, len*sizeof(float), cudaMemcpyHostToDevice);
distanceKernel<<<len/TPB, TPB>>>(d_out, d_in, ref);
cudaMemcpy(out, d_out, len*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
float scale(int i, int n)
{
return ((float) i)/(n-1);
}
int main()
{
const float ref=0.5f;
float *in=(float*) calloc(N,sizeof(float));
float *out=(float*) calloc(N, sizeof(float));
for(int i=0; i<N; ++i)
{
in[i]=scale(i,N); //
}
distanceArray(out, in, ref, N);
printf("______________________________ \n");
for(int j=0; j<N; ++j)
{
printf("The distance, printed from the host, between %f to %f is %f. \n", ref, in[j], out[j]);
}
free(in);
free(out);
return 0;
} | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#define TPB 8
#define N 32
__device__ float distance(float x1, float x2)
{
return sqrt ((x2-x1)*(x2-x1));
}
__global__ void distanceKernel(float *d_out, float *d_in, float ref)
{
const int i=blockIdx.x*blockDim.x+threadIdx.x;
const int j=blockIdx.x;
const int k=threadIdx.x;
const int l=blockDim.x;
const float x=d_in[i];
d_out[i]=distance(x, ref);
printf("blockDIM=%d, blockID=%d, threadID=%d, i=%d: the distance between %f to %f is %f. \n", l, j, k, i, ref, x, d_out[i]); ////
}
void distanceArray(float *out, float *in, float ref, int len)
{
float *d_in=0;
float *d_out=0; ////
hipMalloc(&d_in, len*sizeof(float));
hipMalloc(&d_out, len*sizeof(float));
hipMemcpy(d_in, in, len*sizeof(float), hipMemcpyHostToDevice);
distanceKernel<<<len/TPB, TPB>>>(d_out, d_in, ref);
hipMemcpy(out, d_out, len*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_in);
hipFree(d_out);
}
float scale(int i, int n)
{
return ((float) i)/(n-1);
}
int main()
{
const float ref=0.5f;
float *in=(float*) calloc(N,sizeof(float));
float *out=(float*) calloc(N, sizeof(float));
for(int i=0; i<N; ++i)
{
in[i]=scale(i,N); //
}
distanceArray(out, in, ref, N);
printf("______________________________ \n");
for(int j=0; j<N; ++j)
{
printf("The distance, printed from the host, between %f to %f is %f. \n", ref, in[j], out[j]);
}
free(in);
free(out);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#define TPB 8
#define N 32
__device__ float distance(float x1, float x2)
{
return sqrt ((x2-x1)*(x2-x1));
}
__global__ void distanceKernel(float *d_out, float *d_in, float ref)
{
const int i=blockIdx.x*blockDim.x+threadIdx.x;
const int j=blockIdx.x;
const int k=threadIdx.x;
const int l=blockDim.x;
const float x=d_in[i];
d_out[i]=distance(x, ref);
printf("blockDIM=%d, blockID=%d, threadID=%d, i=%d: the distance between %f to %f is %f. \n", l, j, k, i, ref, x, d_out[i]); ////
}
void distanceArray(float *out, float *in, float ref, int len)
{
float *d_in=0;
float *d_out=0; ////
hipMalloc(&d_in, len*sizeof(float));
hipMalloc(&d_out, len*sizeof(float));
hipMemcpy(d_in, in, len*sizeof(float), hipMemcpyHostToDevice);
distanceKernel<<<len/TPB, TPB>>>(d_out, d_in, ref);
hipMemcpy(out, d_out, len*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_in);
hipFree(d_out);
}
float scale(int i, int n)
{
return ((float) i)/(n-1);
}
int main()
{
const float ref=0.5f;
float *in=(float*) calloc(N,sizeof(float));
float *out=(float*) calloc(N, sizeof(float));
for(int i=0; i<N; ++i)
{
in[i]=scale(i,N); //
}
distanceArray(out, in, ref, N);
printf("______________________________ \n");
for(int j=0; j<N; ++j)
{
printf("The distance, printed from the host, between %f to %f is %f. \n", ref, in[j], out[j]);
}
free(in);
free(out);
return 0;
} | .text
.file "cuda4.hip"
.globl _Z29__device_stub__distanceKernelPfS_f # -- Begin function _Z29__device_stub__distanceKernelPfS_f
.p2align 4, 0x90
.type _Z29__device_stub__distanceKernelPfS_f,@function
_Z29__device_stub__distanceKernelPfS_f: # @_Z29__device_stub__distanceKernelPfS_f
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14distanceKernelPfS_f, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z29__device_stub__distanceKernelPfS_f, .Lfunc_end0-_Z29__device_stub__distanceKernelPfS_f
.cfi_endproc
# -- End function
.globl _Z13distanceArrayPfS_fi # -- Begin function _Z13distanceArrayPfS_fi
.p2align 4, 0x90
.type _Z13distanceArrayPfS_fi,@function
_Z13distanceArrayPfS_fi: # @_Z13distanceArrayPfS_fi
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movss %xmm0, 24(%rsp) # 4-byte Spill
movq %rsi, %r15
movq %rdi, %rbx
movq $0, 16(%rsp)
movq $0, 8(%rsp)
movslq %edx, %r12
leaq (,%r12,4), %r14
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leal 7(%r12), %edi
testl %r12d, %r12d
cmovnsl %r12d, %edi
sarl $3, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $8, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 8(%rsp), %rax
movq 16(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movss 24(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 28(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 28(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z14distanceKernelPfS_f, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z13distanceArrayPfS_fi, .Lfunc_end1-_Z13distanceArrayPfS_fi
.cfi_endproc
# -- End function
.globl _Z5scaleii # -- Begin function _Z5scaleii
.p2align 4, 0x90
.type _Z5scaleii,@function
_Z5scaleii: # @_Z5scaleii
.cfi_startproc
# %bb.0:
cvtsi2ss %edi, %xmm0
decl %esi
cvtsi2ss %esi, %xmm1
divss %xmm1, %xmm0
retq
.Lfunc_end2:
.size _Z5scaleii, .Lfunc_end2-_Z5scaleii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x41f80000 # float 31
.LCPI3_1:
.long 0x3f000000 # float 0.5
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI3_2:
.quad 0x3fe0000000000000 # double 0.5
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $32, %edi
movl $4, %esi
callq calloc
movq %rax, %rbx
movl $32, %edi
movl $4, %esi
callq calloc
movq %rax, %r14
xorl %eax, %eax
movss .LCPI3_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB3_1: # =>This Inner Loop Header: Depth=1
xorps %xmm1, %xmm1
cvtsi2ss %eax, %xmm1
divss %xmm0, %xmm1
movss %xmm1, (%rbx,%rax,4)
incq %rax
cmpq $32, %rax
jne .LBB3_1
# %bb.2:
movss .LCPI3_1(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movq %r14, %rdi
movq %rbx, %rsi
movl $32, %edx
callq _Z13distanceArrayPfS_fi
movl $.Lstr, %edi
callq puts@PLT
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_3: # =>This Inner Loop Header: Depth=1
movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorps %xmm1, %xmm1
cvtss2sd %xmm0, %xmm1
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorps %xmm2, %xmm2
cvtss2sd %xmm0, %xmm2
movl $.L.str.1, %edi
movsd .LCPI3_2(%rip), %xmm0 # xmm0 = mem[0],zero
movb $3, %al
callq printf
incq %r15
cmpq $32, %r15
jne .LBB3_3
# %bb.4:
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14distanceKernelPfS_f, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14distanceKernelPfS_f,@object # @_Z14distanceKernelPfS_f
.section .rodata,"a",@progbits
.globl _Z14distanceKernelPfS_f
.p2align 3, 0x0
_Z14distanceKernelPfS_f:
.quad _Z29__device_stub__distanceKernelPfS_f
.size _Z14distanceKernelPfS_f, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "The distance, printed from the host, between %f to %f is %f. \n"
.size .L.str.1, 63
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14distanceKernelPfS_f"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "______________________________ "
.size .Lstr, 32
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__distanceKernelPfS_f
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14distanceKernelPfS_f
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00000970_00000000-6_cuda4.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8distanceff
.type _Z8distanceff, @function
_Z8distanceff:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z8distanceff, .-_Z8distanceff
.globl _Z5scaleii
.type _Z5scaleii, @function
_Z5scaleii:
.LFB2059:
.cfi_startproc
endbr64
pxor %xmm0, %xmm0
cvtsi2ssl %edi, %xmm0
subl $1, %esi
pxor %xmm1, %xmm1
cvtsi2ssl %esi, %xmm1
divss %xmm1, %xmm0
ret
.cfi_endproc
.LFE2059:
.size _Z5scaleii, .-_Z5scaleii
.globl _Z37__device_stub__Z14distanceKernelPfS_fPfS_f
.type _Z37__device_stub__Z14distanceKernelPfS_fPfS_f, @function
_Z37__device_stub__Z14distanceKernelPfS_fPfS_f:
.LFB2085:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L10
.L6:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L11
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L10:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14distanceKernelPfS_f(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L6
.L11:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z37__device_stub__Z14distanceKernelPfS_fPfS_f, .-_Z37__device_stub__Z14distanceKernelPfS_fPfS_f
.globl _Z14distanceKernelPfS_f
.type _Z14distanceKernelPfS_f, @function
_Z14distanceKernelPfS_f:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z14distanceKernelPfS_fPfS_f
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z14distanceKernelPfS_f, .-_Z14distanceKernelPfS_f
.globl _Z13distanceArrayPfS_fi
.type _Z13distanceArrayPfS_fi, @function
_Z13distanceArrayPfS_fi:
.LFB2058:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $72, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r12
movq %rsi, %r13
movss %xmm0, 12(%rsp)
movl %edx, %ebp
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movslq %edx, %rbx
salq $2, %rbx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $8, 44(%rsp)
movl $1, 48(%rsp)
leal 7(%rbp), %eax
testl %ebp, %ebp
cmovns %ebp, %eax
sarl $3, %eax
movl %eax, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L15:
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L19
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
movss 12(%rsp), %xmm0
movq 16(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z37__device_stub__Z14distanceKernelPfS_fPfS_f
jmp .L15
.L19:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z13distanceArrayPfS_fi, .-_Z13distanceArrayPfS_fi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "______________________________ \n"
.align 8
.LC4:
.string "The distance, printed from the host, between %f to %f is %f. \n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movl $4, %esi
movl $32, %edi
call calloc@PLT
movq %rax, %rbp
movl $4, %esi
movl $32, %edi
call calloc@PLT
movq %rax, %r12
movl $0, %eax
movss .LC0(%rip), %xmm1
.L21:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss %xmm1, %xmm0
movss %xmm0, 0(%rbp,%rax,4)
addq $1, %rax
cmpq $32, %rax
jne .L21
movl $32, %edx
movss .LC1(%rip), %xmm0
movq %rbp, %rsi
movq %r12, %rdi
call _Z13distanceArrayPfS_fi
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %ebx
leaq .LC4(%rip), %r13
.L22:
pxor %xmm2, %xmm2
cvtss2sd (%r12,%rbx), %xmm2
pxor %xmm1, %xmm1
cvtss2sd 0(%rbp,%rbx), %xmm1
movsd .LC3(%rip), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $3, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq $128, %rbx
jne .L22
movq %rbp, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "_Z14distanceKernelPfS_f"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z14distanceKernelPfS_f(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1106771968
.align 4
.LC1:
.long 1056964608
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC3:
.long 0
.long 1071644672
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuda4.hip"
.globl _Z29__device_stub__distanceKernelPfS_f # -- Begin function _Z29__device_stub__distanceKernelPfS_f
.p2align 4, 0x90
.type _Z29__device_stub__distanceKernelPfS_f,@function
_Z29__device_stub__distanceKernelPfS_f: # @_Z29__device_stub__distanceKernelPfS_f
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14distanceKernelPfS_f, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z29__device_stub__distanceKernelPfS_f, .Lfunc_end0-_Z29__device_stub__distanceKernelPfS_f
.cfi_endproc
# -- End function
.globl _Z13distanceArrayPfS_fi # -- Begin function _Z13distanceArrayPfS_fi
.p2align 4, 0x90
.type _Z13distanceArrayPfS_fi,@function
_Z13distanceArrayPfS_fi: # @_Z13distanceArrayPfS_fi
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movss %xmm0, 24(%rsp) # 4-byte Spill
movq %rsi, %r15
movq %rdi, %rbx
movq $0, 16(%rsp)
movq $0, 8(%rsp)
movslq %edx, %r12
leaq (,%r12,4), %r14
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leal 7(%r12), %edi
testl %r12d, %r12d
cmovnsl %r12d, %edi
sarl $3, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $8, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 8(%rsp), %rax
movq 16(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movss 24(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 28(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 28(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z14distanceKernelPfS_f, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z13distanceArrayPfS_fi, .Lfunc_end1-_Z13distanceArrayPfS_fi
.cfi_endproc
# -- End function
.globl _Z5scaleii # -- Begin function _Z5scaleii
.p2align 4, 0x90
.type _Z5scaleii,@function
_Z5scaleii: # @_Z5scaleii
.cfi_startproc
# %bb.0:
cvtsi2ss %edi, %xmm0
decl %esi
cvtsi2ss %esi, %xmm1
divss %xmm1, %xmm0
retq
.Lfunc_end2:
.size _Z5scaleii, .Lfunc_end2-_Z5scaleii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x41f80000 # float 31
.LCPI3_1:
.long 0x3f000000 # float 0.5
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI3_2:
.quad 0x3fe0000000000000 # double 0.5
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $32, %edi
movl $4, %esi
callq calloc
movq %rax, %rbx
movl $32, %edi
movl $4, %esi
callq calloc
movq %rax, %r14
xorl %eax, %eax
movss .LCPI3_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB3_1: # =>This Inner Loop Header: Depth=1
xorps %xmm1, %xmm1
cvtsi2ss %eax, %xmm1
divss %xmm0, %xmm1
movss %xmm1, (%rbx,%rax,4)
incq %rax
cmpq $32, %rax
jne .LBB3_1
# %bb.2:
movss .LCPI3_1(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movq %r14, %rdi
movq %rbx, %rsi
movl $32, %edx
callq _Z13distanceArrayPfS_fi
movl $.Lstr, %edi
callq puts@PLT
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_3: # =>This Inner Loop Header: Depth=1
movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorps %xmm1, %xmm1
cvtss2sd %xmm0, %xmm1
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorps %xmm2, %xmm2
cvtss2sd %xmm0, %xmm2
movl $.L.str.1, %edi
movsd .LCPI3_2(%rip), %xmm0 # xmm0 = mem[0],zero
movb $3, %al
callq printf
incq %r15
cmpq $32, %r15
jne .LBB3_3
# %bb.4:
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14distanceKernelPfS_f, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14distanceKernelPfS_f,@object # @_Z14distanceKernelPfS_f
.section .rodata,"a",@progbits
.globl _Z14distanceKernelPfS_f
.p2align 3, 0x0
_Z14distanceKernelPfS_f:
.quad _Z29__device_stub__distanceKernelPfS_f
.size _Z14distanceKernelPfS_f, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "The distance, printed from the host, between %f to %f is %f. \n"
.size .L.str.1, 63
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14distanceKernelPfS_f"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "______________________________ "
.size .Lstr, 32
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__distanceKernelPfS_f
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14distanceKernelPfS_f
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //
// Created by lifan on 2021/5/17.
// | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //
// Created by lifan on 2021/5/17.
// | .file "tmpxft_001a4f42_00000000-6_convolution_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //
// Created by lifan on 2021/5/17.
// | #include <hip/hip_runtime.h>
//
// Created by lifan on 2021/5/17.
// |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
//
// Created by lifan on 2021/5/17.
// | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
//
// Created by lifan on 2021/5/17.
// | .text
.file "convolution_cuda.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001a4f42_00000000-6_convolution_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "convolution_cuda.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //
// main.c
// qr
//
// Created by Zia Ul-Huda on 21/11/2016.
// Copyright © 2016 TU Darmstadt. All rights reserved.
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
void showGPUMem();
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
showGPUMem();
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
/* err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}*/
#endif
return;
}
typedef struct {
int m, n;
double * v;
} mat_t, mat;
#define BLOCK_SIZE 16
#define MAX_INT 100
#define EPSILON 0.00000001
int numBlocks;
dim3 dimGrid, dimBlock;
int numBlocksSingle, numThreadsSingle;
//get current wall time
double get_wall_time(){
struct timeval time;
if (gettimeofday(&time,NULL)){
// Handle error
exit(-1);
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
//creates a new structure of mat type with m*n dimensions and
//returns its pointer
mat* matrix_new(int m, int n)
{
mat *x = (mat*)malloc(sizeof(mat_t));
x->v = (double*)calloc(sizeof(double), m * n);
x->m = m;
x->n = n;
return x;
}
/**
* Creates a new structure of type mat
* on the device and initializes it. It returns
* the pointer to the structure in *x
*/
void cuda_matrix_new(int m, int n, mat** x)
{
double* d_arr;
mat temp;// =(mat_t*)malloc(sizeof(mat_t)) ;
temp.m = m;//temp->m = m;
temp.n = n;//temp->n = n;
//allocate mat struct on device
cudaMalloc((void**) x,sizeof(mat_t));
CudaCheckError();
//allocate array on device and set it to 0
cudaMalloc((void**) &d_arr, m*n*sizeof(double));
CudaCheckError();
cudaMemset(d_arr, 0, sizeof(double) * m * n);
CudaCheckError();
//store the device pointer in temp object
temp.v = d_arr; //temp->v = d_arr;
//copy the temp to device object
cudaMemcpy(*x, &temp, sizeof(mat_t), cudaMemcpyHostToDevice);
CudaCheckError();
// free(temp);
}
//delete a matrix
void matrix_delete(mat *m)
{
free(m->v);
free(m);
}
/**
* Free the memory of the structure pointed to by
* m on the device. Make sure to also free the memory
* of the elements of the matrix.
*/
void cuda_matrix_delete(mat *m)
{
mat temp;
// Copy m to host
cudaMemcpy(&temp,m,sizeof(mat),cudaMemcpyDeviceToHost);
CudaCheckError();
// Free array in m
cudaFree(temp.v);
CudaCheckError();
// Free m
cudaFree(m);
CudaCheckError();
}
//calculate transpose of a matrix
void matrix_transpose(mat *m)
{
int i,j;
for (i = 0; i < m->m; i++) {
for (j = 0; j < i; j++) {
double t = m->v[i*m->n+j];
m->v[i*m->n+j] = m->v[j*m->m+i];
m->v[j*m->m+i] = t;
}
}
}
/**
* Transpose the matrix on the device
*/
__global__
void cuda_matrix_transpose(mat* m){
//Calculate the row of current element
int row = blockIdx.y * blockDim.y + threadIdx.y;
//Calculate the column of current element
int col = blockIdx.x * blockDim.x + threadIdx.x;
//Just the threads in lower triangle should swap m elements
if(row<m->m && col<m->n && row<col){
double t = m->v[row*m->n+col];
m->v[row*m->n+col] = m->v[col*m->m+row];
// Finish swapping
m->v[col*m->m+row] = t;
}
}
//Create a new matrix and initialize its elements randomly
mat* matrix_create(int m, int n)
{
mat *x = matrix_new(m, n);
srand(time(NULL));
int i,j;
for (i = 0; i < m*n; i++){
j=rand() % MAX_INT;
x->v[i] = j;
}
return x;
}
//multiplication of two matrixes
mat* matrix_mul(mat *x, mat *y)
{
if (x->n != y->m) return NULL;
mat *r = matrix_new(x->m, y->n);
int i,j,k;
for (i = 0; i < x->m; i++)
for (j = 0; j < y->n; j++)
for (k = 0; k < x->n; k++)
r->v[i*r->n+j] += x->v[i*x->n+k] * y->v[k*y->n+j];
return r;
}
/**
* Multiply matrices x and y on the device and store
* the result in r on the device. r contains already
* enough memory for the result matrix.
*/
__global__
void cuda_matrix_mul(mat* x, mat* y, mat* r)
{
//calculate the row and column index of matrixes x and y respectively
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < x->m && col < y->n){
double rValue=0;
//each thread computes one element of r
int k;
for(k=0; k < x->n; ++k)
rValue += x->v[row*x->n+k]*y->v[k*y->n+col];
r->v[row*r->n+col] = rValue;
}
}
//calculate minor of a matrix given int d. Set first d
//diagonal entries to 1 and and set the rest of elements of
//first d rows and columns to zero. Then copy rest of the
//elements from the given matrix and return the pointer to new
//object
mat* matrix_minor(mat *x, int d)
{
mat *m = matrix_new(x->m, x->n);
int i,j;
for (i = 0; i < d; i++)
m->v[i*m->n+i] = 1;
for (i = d; i < x->m; i++)
for (j = d; j < x->n; j++)
m->v[i*m->n+j] = x->v[i*x->n+j];
return m;
}
/**
* Calculate minor of a matrix given int d on device
*/
__global__
void cuda_matrix_minor(mat* x, int d, mat* m){
//calculate the row and column index of matrixes x and y
//respectively
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < x->m && col < x->n){
if (row == col && row < d)
m->v[row*m->n+col]=1;
if(row >= d && row < x->m && col >= d && col < x->n)
m->v[row*m->n+col]=x->v[row*x->n+col];
}
}
// c = a + b * s
double *vmadd(double a[], double b[], double s, double c[], int n)
{
int i;
for (i = 0; i < n; i++)
c[i] = a[i] + s * b[i];
return c;
}
/**
* c = a + b * s on device
*/
__global__
void cuda_vmadd(double a[], double b[], double *s, double c[], int n){
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row <n)
c[row] = a[row] + b[row]*(*s);
}
// m = I - 2vv^T
mat* vmul(double v[], int n)
{
mat *x = matrix_new(n, n);
int i,j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
x->v[i*x->n+j] = -2 * v[i] * v[j];
for (i = 0; i < n; i++)
x->v[i*x->n+i] += 1;
return x;
}
/**
* m = I - 2vv^T on device
*/
__global__
void cuda_vmul(double v[], int n, mat* m)
{
//calculate the row and column index of matrixes x and y respectively
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < n && col < n){
m->v[row*m->n+col] = -2*v[row]*v[col];
if(row == col)
m->v[row*m->n+col] += 1;
}
}
// ||x||
double vnorm(double x[], int n)
{
double sum = 0;
int i;
for (i = 0; i < n; i++) sum += x[i] * x[i];
return sqrt(sum);
}
/**
* Call with <<1,1>>
* ||x|| on device and result is given in *a.
* If flag is true (!= 0) a is multiplied with -1
*/
__global__
void cuda_vnorm(double x[], int n, double *a, int flag)
{
if(blockIdx.x == 0 && threadIdx.x == 0 ) {
double sum = 0;
int i;
for (i = 0; i < n; i++)
sum += x[i]*x[i];
*a = sqrt(sum);
if (flag) *a = *a*(-1);
}
}
// y = x / d
double* vdiv(double x[], double d, double y[], int n)
{
int i;
for (i = 0; i < n; i++) y[i] = x[i] / d;
return y;
}
/**
* y = x / d on device
*/
__global__
void cuda_vdiv(double x[], double *d, double y[], int n)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row <n)
y[row]=x[row]/(*d);
}
// take c-th column of m, put in v
double* mcol(mat *m, double *v, int c)
{
int i;
for (i = 0; i < m->m; i++)
v[i] = m->v[i*m->n+c];
return v;
}
/**
* Take c-th column of m, put in v on device
*/
__global__
void cuda_mcol(mat *m, double *v, int c)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row < m->m)
v[row] = m->v[row*m->n+c];
}
/**
* Initialize vector e where k-th element is set to 1
* and all other are 0 on device
*/
__global__
void cuda_initialize_e(double* e, int n, int k){
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row < n){
if(row==k){
e[row] = 1;
}else{
e[row] = 0;
}
}
}
//visualize a matrix
void matrix_show(mat *m)
{
int i,j;
for(i = 0; i < m->m; i++) {
for (j = 0; j < m->n; j++) {
printf(" %8.3f", m->v[i*m->n+j]);
}
printf("\n");
}
printf("\n");
}
//householder calculations
void householder(mat *m, mat **R, mat **Q)
{
mat *q[m->m];
mat *z = m, *z1;
int i,k;
for (k = 0; k < m->n && k < m->m - 1; k++) {
double e[m->m], x[m->m], a;
z1 = matrix_minor(z, k);
if (z != m) matrix_delete(z);
z = z1;
mcol(z, x, k);
a = vnorm(x, m->m);
if (m->v[k*m->n+k] > 0) a = -a;
for (i = 0; i < m->m; i++)
e[i] = (i == k) ? 1 : 0;
vmadd(x, e, a, e, m->m);
vdiv(e, vnorm(e, m->m), e, m->m);
q[k] = vmul(e, m->m);
z1 = matrix_mul(q[k], z);
if (z != m) matrix_delete(z);
z = z1;
}
matrix_delete(z);
*Q = q[0];
*R = matrix_mul(q[0], m);
for (i = 1; i < m->n && i < m->m - 1; i++) {
z1 = matrix_mul(q[i], *Q);
if (i > 1) matrix_delete(*Q);
*Q = z1;
matrix_delete(q[i]);
}
matrix_delete(q[0]);
z = matrix_mul(*Q, m);
matrix_delete(*R);
*R = z;
matrix_transpose(*Q);
}
/**
* Householder calculations with calls to device kernels
*/
void cuda_householder(mat *m, mat **R, mat **Q, mat *original)
{
mat *q;
mat *z = m, *z1;
int k;
double *e, *x, *a;
// Alloc vector e
cudaMalloc((void**)&e, sizeof(double) * original->m);
CudaCheckError();
// Alloc vector x
cudaMalloc((void**)&x, sizeof(double) * original->m);
CudaCheckError();
// Alloc scalar a
cudaMalloc((void**)&a, sizeof(double));
CudaCheckError();
//showGPUMem();
for (k = 0; k < original->n && k < original->m - 1; k++) {
// Allocate and init matrix z1
cuda_matrix_new(original->m,original->n, &z1);
// One thread calculates one element of matrix z1
cuda_matrix_minor<<<dimGrid, dimBlock>>>(z, k, z1 ); //Versuch, Idee alternativ (original, k, z1) wenn original->v == m->v
CudaCheckError();
if (z != m) cuda_matrix_delete(z);
z = z1;
// One thread calculates one element of vector x
cuda_mcol<<<numBlocksSingle,numThreadsSingle>>>(z, x, k); //Abgeschrieben von sequentieller Funktion
//z müsste ein Möglichkeit bieten auf z->v bzw m->v zuzugreifen
CudaCheckError();
int f = (original->v[k*original->n+k] > 0) ? 1 : 0;
// Call cuda_vnorm with only one thread
cuda_vnorm<<<1,1>>>(x, original->m, a, f); //Eingetragen: 1,1
CudaCheckError();
// One thread calculates one element of vector e
cuda_initialize_e<<<numBlocksSingle,numThreadsSingle>>>(e, original->m, k); //Eingetragen (e, original->m, k)
CudaCheckError();
// One thread calculates one element of vector e
cuda_vmadd<<<numBlocksSingle, numThreadsSingle>>>(x, e, a, e, original->m); //Eingetragen 1,1 und original->m
CudaCheckError();
// Call cuda_vnorm with only one thread
cuda_vnorm<<<1,1>>>(e, original->m, a, 0);
CudaCheckError();
// One thread calculates one element of vector e with cuda_vdiv
cuda_vdiv<<<numBlocksSingle,numThreadsSingle>>>(e, a, e, original->m); //EIngetragen: cuda_cdiv und 1,1
CudaCheckError();
// Allocate matrix q
cuda_matrix_new(original->m, original->m, &q);
// One thread calculates one element of matrix q
CudaCheckError();
cuda_vmul<<<dimGrid, dimBlock>>>(e, original->m, q);
CudaCheckError();
// Allocate matrix z1
cuda_matrix_new(original->m,original->n,&z1);
// One thread calculates one element of matrix z1
// Calculate matrix product z1 = q*z with cuda_matrix_mul
cuda_matrix_mul<<<dimGrid,dimBlock>>>(q,z,z1); //Komplett selbst geschrieben
CudaCheckError();
if (z != m) cuda_matrix_delete(z);
z = z1;
if(k==0){
*Q = q;
}
else if(k>0){
cuda_matrix_new(original->m, original->m, &z1);
cuda_matrix_mul<<<dimGrid, dimBlock>>>(q, *Q, z1);
CudaCheckError();
cuda_matrix_delete(*Q);
*Q = z1;
cuda_matrix_delete(q);
}
}
// Free temporary storage on device
cudaFree(e);
CudaCheckError();
cudaFree(x);
CudaCheckError();
cudaFree(a);
CudaCheckError();
cuda_matrix_delete(z);
cuda_matrix_new(original->m, original->n, R);
// Result matrix R
cuda_matrix_mul<<<dimGrid, dimBlock>>>(*Q, m, *R);
CudaCheckError();
// Result matrix Q
cuda_matrix_transpose<<<dimGrid, dimBlock>>>(*Q);
CudaCheckError();
}
/** Task2
* Deep copy of matrix x to the device.
* Return pointer to new structure on device in *dX
*/
void copyToDevice(mat** dX, mat* x){
mat temp;
temp.m = x->m;
temp.n = x->n;
double* d_arr;
//allocate device matrix
cudaMalloc((void**)dX, sizeof(mat));
CudaCheckError();
//allocate device array
cudaMalloc((void**)&d_arr, x->m*x->n*sizeof(double));
CudaCheckError();
//copy contents of x array
cudaMemcpy(d_arr, x->v, x->m*x->n*sizeof(double), cudaMemcpyHostToDevice);
CudaCheckError();
//save d_arr in temp
temp.v = d_arr;
//copy the temp to device object
cudaMemcpy(*dX, &temp, sizeof(mat_t), cudaMemcpyHostToDevice);
CudaCheckError();
}
/**
* Deep copy of matrix dX to the host.
* Return pointer to new structure on host in *x
*/
void copyToHost(mat** x, mat* dX){
*x = (mat*)malloc(sizeof(mat_t));
cudaMemcpy(*x, dX, sizeof(mat_t), cudaMemcpyDeviceToHost);
CudaCheckError();
double* temp = (double*)malloc(sizeof(double) * (*x)->m * (*x)->n);
// Copy array of dX to temp
cudaMemcpy(temp, (*x)->v, sizeof(double) * (*x)->m * (*x)->n, cudaMemcpyDeviceToHost);
CudaCheckError();
(*x)->v = temp;
}
//check if two matrixes are equal with their corrsponding element's values being within an epsilon
int is_equal(mat *m, mat *x){
if(m->m != x->m || m->n != x->n) return 0;
int i;
for(i=0; i< (m->m * m->n); ++i)
if(abs(m->v[i] - x->v[i]) > EPSILON) return 0;
return 1;
}
void showGPUMem(){
// show memory usage of GPU
size_t free_byte ;
size_t total_byte ;
cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ;
if ( cudaSuccess != cuda_status ){
printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) );
exit(1);
}
double free_db = (double)free_byte ;
double total_db = (double)total_byte ;
double used_db = total_db - free_db ;
printf("GPU memory usage: used = %f MB, free = %f MB, total = %f MB\n",
used_db/1024.0/1024.0, free_db/1024.0/1024.0,
total_db/1024.0/1024.0);
}
int main(int argc, char *argv[])
{
if(argc != 3){
puts("Usage: qr #rows #cols\n //#rows > 2 and #cols > 1\n");
exit(0);
}
int row = atoi(argv[1]), col = atoi(argv[2]);
if(row < 3 || col < 2){
puts("Error: invalid number of rows or columns\n");
exit(0);
}
int maxDim = (row > col) ? row : col;
//use maxDim to calculate dimensions of grids and blocks for 2D cuda kernels
numBlocks = maxDim / BLOCK_SIZE;
if(maxDim % BLOCK_SIZE) numBlocks++;
dimGrid.x = numBlocks; dimGrid.y = numBlocks;
// Every CUDA block is of size (x,y,z) = (BLOCK_SIZE,BLOCK_SIZE,1) threads
dimBlock.x = BLOCK_SIZE; dimBlock.y = BLOCK_SIZE;
//dimensions of blocks and threads for 1D cuda kernels for vectors
// Every CUDA block is of size (x,y,z) = (BLOCK_SIZE*BLOCK_SIZE,1,1)
numThreadsSingle = BLOCK_SIZE * BLOCK_SIZE;
numBlocksSingle = maxDim/numThreadsSingle;
if(maxDim % numThreadsSingle) ++numBlocksSingle;
mat *R = NULL, *Q = NULL, *dX = NULL, *dQ = NULL, *dR = NULL;
//showGPUMem();
//create a random row*col matrix
mat *x = matrix_create(row, col);
//puts("x"); matrix_show(x);
fprintf(stderr,"matix x %d,%d,%p\n",x->m,x->n,x->v);
double time_start = get_wall_time();
//copy x to device
copyToDevice(&dX, x);
fprintf(stderr,"adress &X: %p\n",dX);
//showGPUMem();
//householder calculations on device
cuda_householder(dX, &dR, &dQ, x);
//copy the calculated dR and dQ to host
copyToHost(&R, dR);
copyToHost(&Q, dQ);
double time_end = get_wall_time();
//puts("Q"); matrix_show(Q);
//puts("R"); matrix_show(R);
// to show their product is the input matrix
mat* dM = NULL;
cuda_matrix_new(x->m, x->n, &dM);
cuda_matrix_mul<<<dimGrid, dimBlock>>>(dQ, dR, dM);
//copy resultant matrix to host
mat* m = NULL;
copyToHost(&m, dM);
//puts("Q * R"); matrix_show(m);
printf("Verification: ");
if(is_equal(m, x))
printf("Successful\n");
else
printf("Unsuccessful\n");
printf("Time taken: %8.3f seconds\n",time_end - time_start);
matrix_delete(x);
matrix_delete(R);
matrix_delete(Q);
matrix_delete(m);
cuda_matrix_delete(dX);
cuda_matrix_delete(dQ);
cuda_matrix_delete(dR);
cuda_matrix_delete(dM);
fflush(stdout);
return 0;
} | //
// main.c
// qr
//
// Created by Zia Ul-Huda on 21/11/2016.
// Copyright © 2016 TU Darmstadt. All rights reserved.
//
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
void showGPUMem();
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
showGPUMem();
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
/* err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}*/
#endif
return;
}
typedef struct {
int m, n;
double * v;
} mat_t, mat;
#define BLOCK_SIZE 16
#define MAX_INT 100
#define EPSILON 0.00000001
int numBlocks;
dim3 dimGrid, dimBlock;
int numBlocksSingle, numThreadsSingle;
//get current wall time
double get_wall_time(){
struct timeval time;
if (gettimeofday(&time,NULL)){
// Handle error
exit(-1);
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
//creates a new structure of mat type with m*n dimensions and
//returns its pointer
mat* matrix_new(int m, int n)
{
mat *x = (mat*)malloc(sizeof(mat_t));
x->v = (double*)calloc(sizeof(double), m * n);
x->m = m;
x->n = n;
return x;
}
/**
* Creates a new structure of type mat
* on the device and initializes it. It returns
* the pointer to the structure in *x
*/
void cuda_matrix_new(int m, int n, mat** x)
{
double* d_arr;
mat temp;// =(mat_t*)malloc(sizeof(mat_t)) ;
temp.m = m;//temp->m = m;
temp.n = n;//temp->n = n;
//allocate mat struct on device
hipMalloc((void**) x,sizeof(mat_t));
CudaCheckError();
//allocate array on device and set it to 0
hipMalloc((void**) &d_arr, m*n*sizeof(double));
CudaCheckError();
hipMemset(d_arr, 0, sizeof(double) * m * n);
CudaCheckError();
//store the device pointer in temp object
temp.v = d_arr; //temp->v = d_arr;
//copy the temp to device object
hipMemcpy(*x, &temp, sizeof(mat_t), hipMemcpyHostToDevice);
CudaCheckError();
// free(temp);
}
//delete a matrix
void matrix_delete(mat *m)
{
free(m->v);
free(m);
}
/**
* Free the memory of the structure pointed to by
* m on the device. Make sure to also free the memory
* of the elements of the matrix.
*/
void cuda_matrix_delete(mat *m)
{
mat temp;
// Copy m to host
hipMemcpy(&temp,m,sizeof(mat),hipMemcpyDeviceToHost);
CudaCheckError();
// Free array in m
hipFree(temp.v);
CudaCheckError();
// Free m
hipFree(m);
CudaCheckError();
}
//calculate transpose of a matrix
void matrix_transpose(mat *m)
{
int i,j;
for (i = 0; i < m->m; i++) {
for (j = 0; j < i; j++) {
double t = m->v[i*m->n+j];
m->v[i*m->n+j] = m->v[j*m->m+i];
m->v[j*m->m+i] = t;
}
}
}
/**
* Transpose the matrix on the device
*/
__global__
void cuda_matrix_transpose(mat* m){
//Calculate the row of current element
int row = blockIdx.y * blockDim.y + threadIdx.y;
//Calculate the column of current element
int col = blockIdx.x * blockDim.x + threadIdx.x;
//Just the threads in lower triangle should swap m elements
if(row<m->m && col<m->n && row<col){
double t = m->v[row*m->n+col];
m->v[row*m->n+col] = m->v[col*m->m+row];
// Finish swapping
m->v[col*m->m+row] = t;
}
}
//Create a new matrix and initialize its elements randomly
mat* matrix_create(int m, int n)
{
mat *x = matrix_new(m, n);
srand(time(NULL));
int i,j;
for (i = 0; i < m*n; i++){
j=rand() % MAX_INT;
x->v[i] = j;
}
return x;
}
//multiplication of two matrixes
mat* matrix_mul(mat *x, mat *y)
{
if (x->n != y->m) return NULL;
mat *r = matrix_new(x->m, y->n);
int i,j,k;
for (i = 0; i < x->m; i++)
for (j = 0; j < y->n; j++)
for (k = 0; k < x->n; k++)
r->v[i*r->n+j] += x->v[i*x->n+k] * y->v[k*y->n+j];
return r;
}
/**
* Multiply matrices x and y on the device and store
* the result in r on the device. r contains already
* enough memory for the result matrix.
*/
__global__
void cuda_matrix_mul(mat* x, mat* y, mat* r)
{
//calculate the row and column index of matrixes x and y respectively
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < x->m && col < y->n){
double rValue=0;
//each thread computes one element of r
int k;
for(k=0; k < x->n; ++k)
rValue += x->v[row*x->n+k]*y->v[k*y->n+col];
r->v[row*r->n+col] = rValue;
}
}
//calculate minor of a matrix given int d. Set first d
//diagonal entries to 1 and and set the rest of elements of
//first d rows and columns to zero. Then copy rest of the
//elements from the given matrix and return the pointer to new
//object
mat* matrix_minor(mat *x, int d)
{
mat *m = matrix_new(x->m, x->n);
int i,j;
for (i = 0; i < d; i++)
m->v[i*m->n+i] = 1;
for (i = d; i < x->m; i++)
for (j = d; j < x->n; j++)
m->v[i*m->n+j] = x->v[i*x->n+j];
return m;
}
/**
* Calculate minor of a matrix given int d on device
*/
__global__
void cuda_matrix_minor(mat* x, int d, mat* m){
//calculate the row and column index of matrixes x and y
//respectively
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < x->m && col < x->n){
if (row == col && row < d)
m->v[row*m->n+col]=1;
if(row >= d && row < x->m && col >= d && col < x->n)
m->v[row*m->n+col]=x->v[row*x->n+col];
}
}
// c = a + b * s
double *vmadd(double a[], double b[], double s, double c[], int n)
{
int i;
for (i = 0; i < n; i++)
c[i] = a[i] + s * b[i];
return c;
}
/**
* c = a + b * s on device
*/
__global__
void cuda_vmadd(double a[], double b[], double *s, double c[], int n){
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row <n)
c[row] = a[row] + b[row]*(*s);
}
// m = I - 2vv^T
mat* vmul(double v[], int n)
{
mat *x = matrix_new(n, n);
int i,j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
x->v[i*x->n+j] = -2 * v[i] * v[j];
for (i = 0; i < n; i++)
x->v[i*x->n+i] += 1;
return x;
}
/**
* m = I - 2vv^T on device
*/
__global__
void cuda_vmul(double v[], int n, mat* m)
{
//calculate the row and column index of matrixes x and y respectively
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < n && col < n){
m->v[row*m->n+col] = -2*v[row]*v[col];
if(row == col)
m->v[row*m->n+col] += 1;
}
}
// ||x||
double vnorm(double x[], int n)
{
double sum = 0;
int i;
for (i = 0; i < n; i++) sum += x[i] * x[i];
return sqrt(sum);
}
/**
* Call with <<1,1>>
* ||x|| on device and result is given in *a.
* If flag is true (!= 0) a is multiplied with -1
*/
__global__
void cuda_vnorm(double x[], int n, double *a, int flag)
{
if(blockIdx.x == 0 && threadIdx.x == 0 ) {
double sum = 0;
int i;
for (i = 0; i < n; i++)
sum += x[i]*x[i];
*a = sqrt(sum);
if (flag) *a = *a*(-1);
}
}
// y = x / d
double* vdiv(double x[], double d, double y[], int n)
{
int i;
for (i = 0; i < n; i++) y[i] = x[i] / d;
return y;
}
/**
* y = x / d on device
*/
__global__
void cuda_vdiv(double x[], double *d, double y[], int n)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row <n)
y[row]=x[row]/(*d);
}
// take c-th column of m, put in v
double* mcol(mat *m, double *v, int c)
{
int i;
for (i = 0; i < m->m; i++)
v[i] = m->v[i*m->n+c];
return v;
}
/**
* Take c-th column of m, put in v on device
*/
__global__
void cuda_mcol(mat *m, double *v, int c)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row < m->m)
v[row] = m->v[row*m->n+c];
}
/**
* Initialize vector e where k-th element is set to 1
* and all other are 0 on device
*/
__global__
void cuda_initialize_e(double* e, int n, int k){
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row < n){
if(row==k){
e[row] = 1;
}else{
e[row] = 0;
}
}
}
//visualize a matrix
void matrix_show(mat *m)
{
int i,j;
for(i = 0; i < m->m; i++) {
for (j = 0; j < m->n; j++) {
printf(" %8.3f", m->v[i*m->n+j]);
}
printf("\n");
}
printf("\n");
}
//householder calculations
void householder(mat *m, mat **R, mat **Q)
{
mat *q[m->m];
mat *z = m, *z1;
int i,k;
for (k = 0; k < m->n && k < m->m - 1; k++) {
double e[m->m], x[m->m], a;
z1 = matrix_minor(z, k);
if (z != m) matrix_delete(z);
z = z1;
mcol(z, x, k);
a = vnorm(x, m->m);
if (m->v[k*m->n+k] > 0) a = -a;
for (i = 0; i < m->m; i++)
e[i] = (i == k) ? 1 : 0;
vmadd(x, e, a, e, m->m);
vdiv(e, vnorm(e, m->m), e, m->m);
q[k] = vmul(e, m->m);
z1 = matrix_mul(q[k], z);
if (z != m) matrix_delete(z);
z = z1;
}
matrix_delete(z);
*Q = q[0];
*R = matrix_mul(q[0], m);
for (i = 1; i < m->n && i < m->m - 1; i++) {
z1 = matrix_mul(q[i], *Q);
if (i > 1) matrix_delete(*Q);
*Q = z1;
matrix_delete(q[i]);
}
matrix_delete(q[0]);
z = matrix_mul(*Q, m);
matrix_delete(*R);
*R = z;
matrix_transpose(*Q);
}
/**
* Householder calculations with calls to device kernels
*/
void cuda_householder(mat *m, mat **R, mat **Q, mat *original)
{
mat *q;
mat *z = m, *z1;
int k;
double *e, *x, *a;
// Alloc vector e
hipMalloc((void**)&e, sizeof(double) * original->m);
CudaCheckError();
// Alloc vector x
hipMalloc((void**)&x, sizeof(double) * original->m);
CudaCheckError();
// Alloc scalar a
hipMalloc((void**)&a, sizeof(double));
CudaCheckError();
//showGPUMem();
for (k = 0; k < original->n && k < original->m - 1; k++) {
// Allocate and init matrix z1
cuda_matrix_new(original->m,original->n, &z1);
// One thread calculates one element of matrix z1
cuda_matrix_minor<<<dimGrid, dimBlock>>>(z, k, z1 ); //Versuch, Idee alternativ (original, k, z1) wenn original->v == m->v
CudaCheckError();
if (z != m) cuda_matrix_delete(z);
z = z1;
// One thread calculates one element of vector x
cuda_mcol<<<numBlocksSingle,numThreadsSingle>>>(z, x, k); //Abgeschrieben von sequentieller Funktion
//z müsste ein Möglichkeit bieten auf z->v bzw m->v zuzugreifen
CudaCheckError();
int f = (original->v[k*original->n+k] > 0) ? 1 : 0;
// Call cuda_vnorm with only one thread
cuda_vnorm<<<1,1>>>(x, original->m, a, f); //Eingetragen: 1,1
CudaCheckError();
// One thread calculates one element of vector e
cuda_initialize_e<<<numBlocksSingle,numThreadsSingle>>>(e, original->m, k); //Eingetragen (e, original->m, k)
CudaCheckError();
// One thread calculates one element of vector e
cuda_vmadd<<<numBlocksSingle, numThreadsSingle>>>(x, e, a, e, original->m); //Eingetragen 1,1 und original->m
CudaCheckError();
// Call cuda_vnorm with only one thread
cuda_vnorm<<<1,1>>>(e, original->m, a, 0);
CudaCheckError();
// One thread calculates one element of vector e with cuda_vdiv
cuda_vdiv<<<numBlocksSingle,numThreadsSingle>>>(e, a, e, original->m); //EIngetragen: cuda_cdiv und 1,1
CudaCheckError();
// Allocate matrix q
cuda_matrix_new(original->m, original->m, &q);
// One thread calculates one element of matrix q
CudaCheckError();
cuda_vmul<<<dimGrid, dimBlock>>>(e, original->m, q);
CudaCheckError();
// Allocate matrix z1
cuda_matrix_new(original->m,original->n,&z1);
// One thread calculates one element of matrix z1
// Calculate matrix product z1 = q*z with cuda_matrix_mul
cuda_matrix_mul<<<dimGrid,dimBlock>>>(q,z,z1); //Komplett selbst geschrieben
CudaCheckError();
if (z != m) cuda_matrix_delete(z);
z = z1;
if(k==0){
*Q = q;
}
else if(k>0){
cuda_matrix_new(original->m, original->m, &z1);
cuda_matrix_mul<<<dimGrid, dimBlock>>>(q, *Q, z1);
CudaCheckError();
cuda_matrix_delete(*Q);
*Q = z1;
cuda_matrix_delete(q);
}
}
// Free temporary storage on device
hipFree(e);
CudaCheckError();
hipFree(x);
CudaCheckError();
hipFree(a);
CudaCheckError();
cuda_matrix_delete(z);
cuda_matrix_new(original->m, original->n, R);
// Result matrix R
cuda_matrix_mul<<<dimGrid, dimBlock>>>(*Q, m, *R);
CudaCheckError();
// Result matrix Q
cuda_matrix_transpose<<<dimGrid, dimBlock>>>(*Q);
CudaCheckError();
}
/** Task2
* Deep copy of matrix x to the device.
* Return pointer to new structure on device in *dX
*/
void copyToDevice(mat** dX, mat* x){
mat temp;
temp.m = x->m;
temp.n = x->n;
double* d_arr;
//allocate device matrix
hipMalloc((void**)dX, sizeof(mat));
CudaCheckError();
//allocate device array
hipMalloc((void**)&d_arr, x->m*x->n*sizeof(double));
CudaCheckError();
//copy contents of x array
hipMemcpy(d_arr, x->v, x->m*x->n*sizeof(double), hipMemcpyHostToDevice);
CudaCheckError();
//save d_arr in temp
temp.v = d_arr;
//copy the temp to device object
hipMemcpy(*dX, &temp, sizeof(mat_t), hipMemcpyHostToDevice);
CudaCheckError();
}
/**
* Deep copy of matrix dX to the host.
* Return pointer to new structure on host in *x
*/
void copyToHost(mat** x, mat* dX){
*x = (mat*)malloc(sizeof(mat_t));
hipMemcpy(*x, dX, sizeof(mat_t), hipMemcpyDeviceToHost);
CudaCheckError();
double* temp = (double*)malloc(sizeof(double) * (*x)->m * (*x)->n);
// Copy array of dX to temp
hipMemcpy(temp, (*x)->v, sizeof(double) * (*x)->m * (*x)->n, hipMemcpyDeviceToHost);
CudaCheckError();
(*x)->v = temp;
}
//check if two matrixes are equal with their corrsponding element's values being within an epsilon
int is_equal(mat *m, mat *x){
if(m->m != x->m || m->n != x->n) return 0;
int i;
for(i=0; i< (m->m * m->n); ++i)
if(abs(m->v[i] - x->v[i]) > EPSILON) return 0;
return 1;
}
void showGPUMem(){
// show memory usage of GPU
size_t free_byte ;
size_t total_byte ;
hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ) ;
if ( hipSuccess != cuda_status ){
printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) );
exit(1);
}
double free_db = (double)free_byte ;
double total_db = (double)total_byte ;
double used_db = total_db - free_db ;
printf("GPU memory usage: used = %f MB, free = %f MB, total = %f MB\n",
used_db/1024.0/1024.0, free_db/1024.0/1024.0,
total_db/1024.0/1024.0);
}
int main(int argc, char *argv[])
{
if(argc != 3){
puts("Usage: qr #rows #cols\n //#rows > 2 and #cols > 1\n");
exit(0);
}
int row = atoi(argv[1]), col = atoi(argv[2]);
if(row < 3 || col < 2){
puts("Error: invalid number of rows or columns\n");
exit(0);
}
int maxDim = (row > col) ? row : col;
//use maxDim to calculate dimensions of grids and blocks for 2D cuda kernels
numBlocks = maxDim / BLOCK_SIZE;
if(maxDim % BLOCK_SIZE) numBlocks++;
dimGrid.x = numBlocks; dimGrid.y = numBlocks;
// Every CUDA block is of size (x,y,z) = (BLOCK_SIZE,BLOCK_SIZE,1) threads
dimBlock.x = BLOCK_SIZE; dimBlock.y = BLOCK_SIZE;
//dimensions of blocks and threads for 1D cuda kernels for vectors
// Every CUDA block is of size (x,y,z) = (BLOCK_SIZE*BLOCK_SIZE,1,1)
numThreadsSingle = BLOCK_SIZE * BLOCK_SIZE;
numBlocksSingle = maxDim/numThreadsSingle;
if(maxDim % numThreadsSingle) ++numBlocksSingle;
mat *R = NULL, *Q = NULL, *dX = NULL, *dQ = NULL, *dR = NULL;
//showGPUMem();
//create a random row*col matrix
mat *x = matrix_create(row, col);
//puts("x"); matrix_show(x);
fprintf(stderr,"matix x %d,%d,%p\n",x->m,x->n,x->v);
double time_start = get_wall_time();
//copy x to device
copyToDevice(&dX, x);
fprintf(stderr,"adress &X: %p\n",dX);
//showGPUMem();
//householder calculations on device
cuda_householder(dX, &dR, &dQ, x);
//copy the calculated dR and dQ to host
copyToHost(&R, dR);
copyToHost(&Q, dQ);
double time_end = get_wall_time();
//puts("Q"); matrix_show(Q);
//puts("R"); matrix_show(R);
// to show their product is the input matrix
mat* dM = NULL;
cuda_matrix_new(x->m, x->n, &dM);
cuda_matrix_mul<<<dimGrid, dimBlock>>>(dQ, dR, dM);
//copy resultant matrix to host
mat* m = NULL;
copyToHost(&m, dM);
//puts("Q * R"); matrix_show(m);
printf("Verification: ");
if(is_equal(m, x))
printf("Successful\n");
else
printf("Unsuccessful\n");
printf("Time taken: %8.3f seconds\n",time_end - time_start);
matrix_delete(x);
matrix_delete(R);
matrix_delete(Q);
matrix_delete(m);
cuda_matrix_delete(dX);
cuda_matrix_delete(dQ);
cuda_matrix_delete(dR);
cuda_matrix_delete(dM);
fflush(stdout);
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/reduce.h>
using namespace std;
int main(int argc, const char *argv[]) {
string N;
if (argc > 1) {
N = string(argv[1]);
}
unsigned int n = atoi(N.c_str());
thrust::host_vector<int> H(n);
for (unsigned int i = 0; i < n; i++) {
H[i] = 1;
}
thrust::device_vector<int> D = H;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int sum = thrust::reduce(D.begin(), D.end());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms;
cudaEventElapsedTime(&ms, start, stop);
cout << sum << endl;
cout << ms << endl;
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/reduce.h>
using namespace std;
int main(int argc, const char *argv[]) {
string N;
if (argc > 1) {
N = string(argv[1]);
}
unsigned int n = atoi(N.c_str());
thrust::host_vector<int> H(n);
for (unsigned int i = 0; i < n; i++) {
H[i] = 1;
}
thrust::device_vector<int> D = H;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
int sum = thrust::reduce(D.begin(), D.end());
hipEventRecord(stop);
hipEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms;
hipEventElapsedTime(&ms, start, stop);
cout << sum << endl;
cout << ms << endl;
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
#include <stdio.h>
#include<cuda.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 1024
// Variables
uint32_t* h_A;
uint32_t* h_B;
uint32_t* h_C;
uint32_t* d_A;
uint32_t* d_B;
uint32_t* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(uint32_t*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const uint32_t* A, const uint32_t* B, uint32_t* C, int N, uint64_t iterations, int div)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
uint32_t Value1=1;
uint32_t Value2=A[i];
uint32_t Value3=B[i];
uint32_t Value;
uint32_t I1=A[i];
uint32_t I2=B[i];
// Excessive Addition access
// if(((i%32)<=31))
if((i%32)<div){
#pragma unroll 100
for(uint64_t k=0; k<iterations;k++) {
Value1=I1*A[i];
Value3=I2*B[i];
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
}
int main(int argc, char** argv)
{
uint64_t iterations;
unsigned blocks;
int div;
if (argc != 4){
fprintf(stderr,"usage: %s #iterations #cores\n",argv[0]);
exit(1);
}
else {
iterations = atoll(argv[1]);
blocks = atoi(argv[2]);
div = atoi(argv[3]);
}
printf("Power Microbenchmarks with %llu iterations \n", iterations);
int N = THREADS_PER_BLOCK*blocks;
size_t size = N * sizeof(uint32_t);
// Allocate input vectors h_A and h_B in host memory
h_A = (uint32_t*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (uint32_t*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (uint32_t*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(blocks,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N, iterations,div);
cudaThreadSynchronize();
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random uint32_t entries.
void RandomInit(uint32_t* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
} | .file "tmpxft_00104a07_00000000-6_INT_MUL_l1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata._Z17__checkCudaErrors9cudaErrorPKci.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "%s(%i) : CUDA Runtime API error %d: %s.\n"
.section .text._Z17__checkCudaErrors9cudaErrorPKci,"axG",@progbits,_Z17__checkCudaErrors9cudaErrorPKci,comdat
.weak _Z17__checkCudaErrors9cudaErrorPKci
.type _Z17__checkCudaErrors9cudaErrorPKci, @function
_Z17__checkCudaErrors9cudaErrorPKci:
.LFB2057:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L8
ret
.L8:
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl %edi, %ebx
movq %rsi, %rbp
movl %edx, %r12d
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
movl %ebx, %r9d
movl %r12d, %r8d
movq %rbp, %rcx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z17__checkCudaErrors9cudaErrorPKci, .-_Z17__checkCudaErrors9cudaErrorPKci
.text
.globl _Z16CleanupResourcesv
.type _Z16CleanupResourcesv, @function
_Z16CleanupResourcesv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq d_A(%rip), %rdi
testq %rdi, %rdi
je .L10
call cudaFree@PLT
.L10:
movq d_B(%rip), %rdi
testq %rdi, %rdi
je .L11
call cudaFree@PLT
.L11:
movq d_C(%rip), %rdi
testq %rdi, %rdi
je .L12
call cudaFree@PLT
.L12:
movq h_A(%rip), %rdi
testq %rdi, %rdi
je .L13
call free@PLT
.L13:
movq h_B(%rip), %rdi
testq %rdi, %rdi
je .L14
call free@PLT
.L14:
movq h_C(%rip), %rdi
testq %rdi, %rdi
je .L9
call free@PLT
.L9:
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z16CleanupResourcesv, .-_Z16CleanupResourcesv
.globl _Z10RandomInitPji
.type _Z10RandomInitPji, @function
_Z10RandomInitPji:
.LFB2061:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L22
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L19:
call rand@PLT
movslq %eax, %rcx
movq %rcx, %rdx
salq $30, %rdx
addq %rcx, %rdx
sarq $61, %rdx
sarl $31, %eax
subl %eax, %edx
movl %edx, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L19
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L22:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2061:
.size _Z10RandomInitPji, .-_Z10RandomInitPji
.globl _Z41__device_stub__Z12PowerKernal2PKjS0_PjimiPKjS0_Pjimi
.type _Z41__device_stub__Z12PowerKernal2PKjS0_PjimiPKjS0_Pjimi, @function
_Z41__device_stub__Z12PowerKernal2PKjS0_PjimiPKjS0_Pjimi:
.LFB2086:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movq %r8, 8(%rsp)
movl %r9d, 16(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L29
.L25:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L30
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z12PowerKernal2PKjS0_Pjimi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L25
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z41__device_stub__Z12PowerKernal2PKjS0_PjimiPKjS0_Pjimi, .-_Z41__device_stub__Z12PowerKernal2PKjS0_PjimiPKjS0_Pjimi
.globl _Z12PowerKernal2PKjS0_Pjimi
.type _Z12PowerKernal2PKjS0_Pjimi, @function
_Z12PowerKernal2PKjS0_Pjimi:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z12PowerKernal2PKjS0_PjimiPKjS0_Pjimi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z12PowerKernal2PKjS0_Pjimi, .-_Z12PowerKernal2PKjS0_Pjimi
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "usage: %s #iterations #cores\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "Power Microbenchmarks with %llu iterations \n"
.section .rodata.str1.1
.LC3:
.string "before\n"
.section .rodata.str1.8
.align 8
.LC4:
.string "/home/ubuntu/Datasets/stackv2/train-structured/speverel/gpgpu-sim_simulations/master/benchmarks/src/cuda/gpuwattch-ubench/static_power_modeling/ACT_CORE2/INT_MUL_l1.cu"
.section .rodata.str1.1
.LC5:
.string "after\n"
.LC6:
.string "kernel launch failure"
.section .rodata.str1.8
.align 8
.LC7:
.string "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n"
.text
.globl main
.type main, @function
main:
.LFB2059:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rsi, %rbx
cmpl $4, %edi
jne .L41
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtoll@PLT
movq %rax, %r14
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r12
movq 24(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r15
movq %r14, %rdx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r12d, %r13d
sall $10, %r13d
movslq %r13d, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, h_A(%rip)
testq %rax, %rax
je .L42
.L35:
movq %rbx, %rdi
call malloc@PLT
movq %rax, h_B(%rip)
testq %rax, %rax
je .L43
.L36:
movq %rbx, %rdi
call malloc@PLT
movq %rax, h_C(%rip)
testq %rax, %rax
je .L44
.L37:
movl %r13d, %esi
movq h_A(%rip), %rdi
call _Z10RandomInitPji
movl %r13d, %esi
movq h_B(%rip), %rdi
call _Z10RandomInitPji
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rsi
leaq d_A(%rip), %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $122, %edx
leaq .LC4(%rip), %rbp
movq %rbp, %rsi
call _Z17__checkCudaErrors9cudaErrorPKci
movq %rbx, %rsi
leaq d_B(%rip), %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $123, %edx
movq %rbp, %rsi
call _Z17__checkCudaErrors9cudaErrorPKci
movq %rbx, %rsi
leaq d_C(%rip), %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $124, %edx
movq %rbp, %rsi
call _Z17__checkCudaErrors9cudaErrorPKci
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %ecx
movq %rbx, %rdx
movq h_A(%rip), %rsi
movq d_A(%rip), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $128, %edx
movq %rbp, %rsi
call _Z17__checkCudaErrors9cudaErrorPKci
movl $1, %ecx
movq %rbx, %rdx
movq h_B(%rip), %rsi
movq d_B(%rip), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $129, %edx
movq %rbp, %rsi
call _Z17__checkCudaErrors9cudaErrorPKci
movl %r12d, 8(%rsp)
movl $1, 12(%rsp)
movl $1024, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L45
.L38:
call cudaThreadSynchronize@PLT
call cudaGetLastError@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L46
call cudaThreadSynchronize@PLT
movl $2, %ecx
movq %rbx, %rdx
movq d_C(%rip), %rsi
movq h_C(%rip), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $152, %edx
leaq .LC4(%rip), %rsi
call _Z17__checkCudaErrors9cudaErrorPKci
call _Z16CleanupResourcesv
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
movq (%rsi), %rcx
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L42:
call _Z16CleanupResourcesv
jmp .L35
.L43:
call _Z16CleanupResourcesv
jmp .L36
.L44:
call _Z16CleanupResourcesv
jmp .L37
.L45:
movl %r15d, %r9d
movq %r14, %r8
movl %r13d, %ecx
movq d_C(%rip), %rdx
movq d_B(%rip), %rsi
movq d_A(%rip), %rdi
call _Z41__device_stub__Z12PowerKernal2PKjS0_PjimiPKjS0_Pjimi
jmp .L38
.L46:
movl %eax, %edi
call cudaGetErrorString@PLT
pushq %rax
.cfi_def_cfa_offset 104
pushq %rbp
.cfi_def_cfa_offset 112
leaq .LC6(%rip), %r9
movl $143, %r8d
leaq .LC4(%rip), %rcx
leaq .LC7(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.cfi_endproc
.LFE2059:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z12PowerKernal2PKjS0_Pjimi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z12PowerKernal2PKjS0_Pjimi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl my_timer
.bss
.align 4
.type my_timer, @object
.size my_timer, 4
my_timer:
.zero 4
.globl noprompt
.type noprompt, @object
.size noprompt, 1
noprompt:
.zero 1
.globl d_C
.align 8
.type d_C, @object
.size d_C, 8
d_C:
.zero 8
.globl d_B
.align 8
.type d_B, @object
.size d_B, 8
d_B:
.zero 8
.globl d_A
.align 8
.type d_A, @object
.size d_A, 8
d_A:
.zero 8
.globl h_C
.align 8
.type h_C, @object
.size h_C, 8
h_C:
.zero 8
.globl h_B
.align 8
.type h_B, @object
.size h_B, 8
h_B:
.zero 8
.globl h_A
.align 8
.type h_A, @object
.size h_A, 8
h_A:
.zero 8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
#include <stdio.h>
#include<cuda.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 1024
// Variables
uint32_t* h_A;
uint32_t* h_B;
uint32_t* h_C;
uint32_t* d_A;
uint32_t* d_B;
uint32_t* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(uint32_t*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const uint32_t* A, const uint32_t* B, uint32_t* C, int N, uint64_t iterations, int div)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
uint32_t Value1=1;
uint32_t Value2=A[i];
uint32_t Value3=B[i];
uint32_t Value;
uint32_t I1=A[i];
uint32_t I2=B[i];
// Excessive Addition access
// if(((i%32)<=31))
if((i%32)<div){
#pragma unroll 100
for(uint64_t k=0; k<iterations;k++) {
Value1=I1*A[i];
Value3=I2*B[i];
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
}
int main(int argc, char** argv)
{
uint64_t iterations;
unsigned blocks;
int div;
if (argc != 4){
fprintf(stderr,"usage: %s #iterations #cores\n",argv[0]);
exit(1);
}
else {
iterations = atoll(argv[1]);
blocks = atoi(argv[2]);
div = atoi(argv[3]);
}
printf("Power Microbenchmarks with %llu iterations \n", iterations);
int N = THREADS_PER_BLOCK*blocks;
size_t size = N * sizeof(uint32_t);
// Allocate input vectors h_A and h_B in host memory
h_A = (uint32_t*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (uint32_t*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (uint32_t*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(blocks,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N, iterations,div);
cudaThreadSynchronize();
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random uint32_t entries.
void RandomInit(uint32_t* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
} | #include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
#include <stdio.h>
#include<hip/hip_runtime.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#define THREADS_PER_BLOCK 1024
// Variables
uint32_t* h_A;
uint32_t* h_B;
uint32_t* h_C;
uint32_t* d_A;
uint32_t* d_B;
uint32_t* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(uint32_t*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const uint32_t* A, const uint32_t* B, uint32_t* C, int N, uint64_t iterations, int div)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
uint32_t Value1=1;
uint32_t Value2=A[i];
uint32_t Value3=B[i];
uint32_t Value;
uint32_t I1=A[i];
uint32_t I2=B[i];
// Excessive Addition access
// if(((i%32)<=31))
if((i%32)<div){
#pragma unroll 100
for(uint64_t k=0; k<iterations;k++) {
Value1=I1*A[i];
Value3=I2*B[i];
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
}
int main(int argc, char** argv)
{
uint64_t iterations;
unsigned blocks;
int div;
if (argc != 4){
fprintf(stderr,"usage: %s #iterations #cores\n",argv[0]);
exit(1);
}
else {
iterations = atoll(argv[1]);
blocks = atoi(argv[2]);
div = atoi(argv[3]);
}
printf("Power Microbenchmarks with %llu iterations \n", iterations);
int N = THREADS_PER_BLOCK*blocks;
size_t size = N * sizeof(uint32_t);
// Allocate input vectors h_A and h_B in host memory
h_A = (uint32_t*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (uint32_t*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (uint32_t*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(blocks,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N, iterations,div);
hipDeviceSynchronize();
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random uint32_t entries.
void RandomInit(uint32_t* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
#include <stdio.h>
#include<hip/hip_runtime.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#define THREADS_PER_BLOCK 1024
// Variables
uint32_t* h_A;
uint32_t* h_B;
uint32_t* h_C;
uint32_t* d_A;
uint32_t* d_B;
uint32_t* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(uint32_t*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const uint32_t* A, const uint32_t* B, uint32_t* C, int N, uint64_t iterations, int div)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
uint32_t Value1=1;
uint32_t Value2=A[i];
uint32_t Value3=B[i];
uint32_t Value;
uint32_t I1=A[i];
uint32_t I2=B[i];
// Excessive Addition access
// if(((i%32)<=31))
if((i%32)<div){
#pragma unroll 100
for(uint64_t k=0; k<iterations;k++) {
Value1=I1*A[i];
Value3=I2*B[i];
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
}
int main(int argc, char** argv)
{
uint64_t iterations;
unsigned blocks;
int div;
if (argc != 4){
fprintf(stderr,"usage: %s #iterations #cores\n",argv[0]);
exit(1);
}
else {
iterations = atoll(argv[1]);
blocks = atoi(argv[2]);
div = atoi(argv[3]);
}
printf("Power Microbenchmarks with %llu iterations \n", iterations);
int N = THREADS_PER_BLOCK*blocks;
size_t size = N * sizeof(uint32_t);
// Allocate input vectors h_A and h_B in host memory
h_A = (uint32_t*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (uint32_t*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (uint32_t*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(blocks,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N, iterations,div);
hipDeviceSynchronize();
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random uint32_t entries.
void RandomInit(uint32_t* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12PowerKernal2PKjS0_Pjimi
.globl _Z12PowerKernal2PKjS0_Pjimi
.p2align 8
.type _Z12PowerKernal2PKjS0_Pjimi,@function
_Z12PowerKernal2PKjS0_Pjimi:
s_load_b32 s2, s[0:1], 0x3c
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s2, 0xffff
s_load_b64 s[2:3], s[0:1], 0x0
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x20
s_load_b32 s6, s[0:1], 0x28
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v5, vcc_lo, s2, v3
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v4, vcc_lo
s_cmp_lg_u64 s[4:5], 0
s_cselect_b32 s2, -1, 0
global_load_b32 v0, v[5:6], off
v_ashrrev_i32_e32 v5, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v5, 27, v5
v_add_nc_u32_e32 v5, v1, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_and_b32_e32 v5, 0xffffffe0, v5
v_sub_nc_u32_e32 v5, v1, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s6, v5
v_mov_b32_e32 v5, 1
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s6, s2
s_cbranch_execz .LBB0_7
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s2, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
s_add_u32 s2, s4, -1
s_addc_u32 s3, s5, -1
s_add_u32 s7, 0, 0xf5c24dc0
global_load_b32 v3, v[3:4], off
s_addc_u32 s8, 0, 12
s_mul_hi_u32 s10, s7, 0xffffff9c
s_add_i32 s8, s8, 0x28f5c1c
s_sub_i32 s10, s10, s7
s_mul_i32 s11, s8, 0xffffff9c
s_mul_i32 s9, s7, 0xffffff9c
s_add_i32 s10, s10, s11
s_mul_hi_u32 s12, s8, s9
s_mul_i32 s11, s8, s9
s_mul_i32 s13, s7, s10
s_mul_hi_u32 s9, s7, s9
s_mul_hi_u32 s14, s7, s10
s_add_u32 s9, s9, s13
s_addc_u32 s13, 0, s14
s_mul_hi_u32 s14, s8, s10
s_add_u32 s9, s9, s11
s_addc_u32 s9, s13, s12
s_mul_i32 s10, s8, s10
s_addc_u32 s11, s14, 0
s_add_u32 s9, s9, s10
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_u32 v4, s7, s7, s9
s_addc_u32 s9, 0, s11
s_cmp_lg_u32 s7, 0
v_readfirstlane_b32 s7, v4
s_addc_u32 s8, s8, s9
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
s_mul_i32 s9, s2, s8
s_mul_hi_u32 s11, s2, s8
s_mul_hi_u32 s10, s2, s7
s_mul_i32 s12, s3, s7
s_add_u32 s9, s10, s9
s_addc_u32 s10, 0, s11
s_mul_hi_u32 s7, s3, s7
s_mul_hi_u32 s11, s3, s8
s_add_u32 s9, s9, s12
s_addc_u32 s7, s10, s7
s_mul_i32 s8, s3, s8
s_addc_u32 s9, s11, 0
s_add_u32 s7, s7, s8
s_addc_u32 s8, 0, s9
s_mul_hi_u32 s9, s7, 0x64
s_mulk_i32 s7, 0x64
s_mulk_i32 s8, 0x64
v_sub_co_u32 v4, s2, s2, s7
s_add_i32 s9, s9, s8
s_cmp_lg_u32 s2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_sub_co_u32 v5, s2, v4, 0x64
s_subb_u32 s3, s3, s9
s_cmp_lg_u32 s2, 0
v_readfirstlane_b32 s11, v4
v_sub_co_u32 v6, s2, v5, 0x64
v_readfirstlane_b32 s7, v5
s_subb_u32 s8, s3, 0
s_cmp_lg_u32 s2, 0
s_delay_alu instid0(VALU_DEP_2)
v_readfirstlane_b32 s10, v6
s_subb_u32 s2, s8, 0
s_cmpk_gt_u32 s7, 0x63
s_waitcnt vmcnt(1)
v_mov_b32_e32 v4, v0
s_cselect_b32 s9, -1, 0
s_cmp_eq_u32 s8, 0
s_cselect_b32 s9, s9, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lg_u32 s9, 0
s_cselect_b32 s2, s2, s8
s_cselect_b32 s7, s10, s7
s_cmpk_gt_u32 s11, 0x63
s_cselect_b32 s8, -1, 0
s_cmp_eq_u32 s3, 0
s_cselect_b32 s8, s8, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lg_u32 s8, 0
v_cmp_gt_u64_e64 s8, 0x64, s[4:5]
s_cselect_b32 s7, s7, s11
s_cselect_b32 s3, s2, s3
s_add_u32 s2, s7, 1
s_addc_u32 s3, s3, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lg_u64 s[2:3], 0x64
s_cselect_b32 s7, -1, 0
s_cmp_eq_u64 s[2:3], 0x64
s_cselect_b32 s3, 0, s3
s_cselect_b32 s2, 0, s2
s_and_b32 vcc_lo, exec_lo, s8
s_waitcnt vmcnt(0)
v_mul_lo_u32 v3, v3, v3
s_cbranch_vccnz .LBB0_4
s_sub_u32 s4, s4, s2
s_subb_u32 s5, s5, s3
.LBB0_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v4, v4, v0
s_add_u32 s4, s4, 0xffffff9c
s_addc_u32 s5, s5, -1
s_cmp_lg_u64 s[4:5], 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
v_mul_lo_u32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v4, v0
v_mul_lo_u32 v5, v4, v3
s_delay_alu instid0(VALU_DEP_1)
v_mul_lo_u32 v4, v5, v4
s_cbranch_scc1 .LBB0_3
.LBB0_4:
s_and_not1_b32 vcc_lo, exec_lo, s7
s_cbranch_vccnz .LBB0_6
.LBB0_5:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v4, v4, v0
s_add_u32 s2, s2, -1
s_addc_u32 s3, s3, -1
s_cmp_lg_u64 s[2:3], 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mul_lo_u32 v4, v5, v4
s_cbranch_scc1 .LBB0_5
.LBB0_6:
s_delay_alu instid0(VALU_DEP_1)
v_mul_lo_u32 v5, v4, v3
v_mov_b32_e32 v0, v4
.LBB0_7:
s_or_b32 exec_lo, exec_lo, s6
s_load_b64 s[0:1], s[0:1], 0x10
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2)
v_mul_lo_u32 v3, v5, v0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_add_co_u32 v0, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[0:1], v3, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12PowerKernal2PKjS0_Pjimi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12PowerKernal2PKjS0_Pjimi, .Lfunc_end0-_Z12PowerKernal2PKjS0_Pjimi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 8
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12PowerKernal2PKjS0_Pjimi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12PowerKernal2PKjS0_Pjimi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
#include <stdio.h>
#include<hip/hip_runtime.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#define THREADS_PER_BLOCK 1024
// Variables
uint32_t* h_A;
uint32_t* h_B;
uint32_t* h_C;
uint32_t* d_A;
uint32_t* d_B;
uint32_t* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(uint32_t*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const uint32_t* A, const uint32_t* B, uint32_t* C, int N, uint64_t iterations, int div)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
uint32_t Value1=1;
uint32_t Value2=A[i];
uint32_t Value3=B[i];
uint32_t Value;
uint32_t I1=A[i];
uint32_t I2=B[i];
// Excessive Addition access
// if(((i%32)<=31))
if((i%32)<div){
#pragma unroll 100
for(uint64_t k=0; k<iterations;k++) {
Value1=I1*A[i];
Value3=I2*B[i];
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
}
int main(int argc, char** argv)
{
uint64_t iterations;
unsigned blocks;
int div;
if (argc != 4){
fprintf(stderr,"usage: %s #iterations #cores\n",argv[0]);
exit(1);
}
else {
iterations = atoll(argv[1]);
blocks = atoi(argv[2]);
div = atoi(argv[3]);
}
printf("Power Microbenchmarks with %llu iterations \n", iterations);
int N = THREADS_PER_BLOCK*blocks;
size_t size = N * sizeof(uint32_t);
// Allocate input vectors h_A and h_B in host memory
h_A = (uint32_t*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (uint32_t*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (uint32_t*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(blocks,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N, iterations,div);
hipDeviceSynchronize();
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random uint32_t entries.
void RandomInit(uint32_t* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
} | .text
.file "INT_MUL_l1.hip"
.globl _Z27__device_stub__PowerKernal2PKjS0_Pjimi # -- Begin function _Z27__device_stub__PowerKernal2PKjS0_Pjimi
.p2align 4, 0x90
.type _Z27__device_stub__PowerKernal2PKjS0_Pjimi,@function
_Z27__device_stub__PowerKernal2PKjS0_Pjimi: # @_Z27__device_stub__PowerKernal2PKjS0_Pjimi
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 12(%rsp)
movq %r8, 64(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 12(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12PowerKernal2PKjS0_Pjimi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z27__device_stub__PowerKernal2PKjS0_Pjimi, .Lfunc_end0-_Z27__device_stub__PowerKernal2PKjS0_Pjimi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $168, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
cmpl $4, %edi
jne .LBB1_30
# %bb.1:
movq 8(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtoll
movq %rax, %r14
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r12
movq 24(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, 16(%rsp) # 8-byte Spill
movl $.L.str.1, %edi
movq %r14, 24(%rsp) # 8-byte Spill
movq %r14, %rsi
xorl %eax, %eax
callq printf
movl %r12d, %ebp
shll $10, %ebp
movslq %ebp, %rbx
shlq $2, %rbx
movq %rbx, %rdi
callq malloc
movq %rax, h_A(%rip)
testq %rax, %rax
jne .LBB1_3
# %bb.2:
callq _Z16CleanupResourcesv
.LBB1_3:
movq %rbx, %rdi
callq malloc
movq %rax, h_B(%rip)
testq %rax, %rax
jne .LBB1_5
# %bb.4:
callq _Z16CleanupResourcesv
.LBB1_5:
movq %rbx, %rdi
callq malloc
movq %rax, h_C(%rip)
testq %rax, %rax
jne .LBB1_7
# %bb.6:
callq _Z16CleanupResourcesv
.LBB1_7:
movl %ebp, %r13d
testl %ebp, %ebp
jle .LBB1_10
# %bb.8: # %.lr.ph.preheader.i
movq h_A(%rip), %r15
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_9: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
movq %rax, %rcx
shlq $30, %rcx
addq %rax, %rcx
movq %rcx, %rax
shrq $63, %rax
sarq $61, %rcx
addl %eax, %ecx
movl %ecx, (%r15,%r14,4)
incq %r14
cmpq %r14, %r13
jne .LBB1_9
.LBB1_10: # %_Z10RandomInitPji.exit
testl %ebp, %ebp
jle .LBB1_13
# %bb.11: # %.lr.ph.preheader.i30
movq h_B(%rip), %r15
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_12: # %.lr.ph.i32
# =>This Inner Loop Header: Depth=1
callq rand
cltq
movq %rax, %rcx
shlq $30, %rcx
addq %rax, %rcx
movq %rcx, %rax
shrq $63, %rax
sarq $61, %rcx
addl %eax, %ecx
movl %ecx, (%r15,%r14,4)
incq %r14
cmpq %r14, %r13
jne .LBB1_12
.LBB1_13: # %_Z10RandomInitPji.exit36
movl $.Lstr, %edi
callq puts@PLT
movl $d_A, %edi
movq %rbx, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB1_14
# %bb.16: # %_Z17__checkCudaErrors10hipError_tPKci.exit
movl $d_B, %edi
movq %rbx, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB1_17
# %bb.18: # %_Z17__checkCudaErrors10hipError_tPKci.exit38
movl $d_C, %edi
movq %rbx, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB1_19
# %bb.20: # %_Z17__checkCudaErrors10hipError_tPKci.exit40
movl $.Lstr.1, %edi
callq puts@PLT
movq d_A(%rip), %rdi
movq h_A(%rip), %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_21
# %bb.22: # %_Z17__checkCudaErrors10hipError_tPKci.exit42
movq d_B(%rip), %rdi
movq h_B(%rip), %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_23
# %bb.24: # %_Z17__checkCudaErrors10hipError_tPKci.exit44
movl %r12d, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $1024, %rdx # imm = 0x400
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_26
# %bb.25:
movq d_A(%rip), %rax
movq d_B(%rip), %rcx
movq d_C(%rip), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %ebp, 12(%rsp)
movq 24(%rsp), %rax # 8-byte Reload
movq %rax, 80(%rsp)
movq 16(%rsp), %rax # 8-byte Reload
movl %eax, 8(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 80(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z12PowerKernal2PKjS0_Pjimi, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_26:
callq hipDeviceSynchronize
callq hipGetLastError
testl %eax, %eax
jne .LBB1_31
# %bb.27: # %_Z18__getLastCudaErrorPKcS0_i.exit
callq hipDeviceSynchronize
movq h_C(%rip), %rdi
movq d_C(%rip), %rsi
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_28
# %bb.29: # %_Z17__checkCudaErrors10hipError_tPKci.exit47
callq _Z16CleanupResourcesv
xorl %eax, %eax
addq $168, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_30:
.cfi_def_cfa_offset 224
movq stderr(%rip), %rdi
movq (%rbx), %rdx
movl $.L.str, %esi
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.LBB1_14:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.6, %esi
movl $.L.str.3, %edx
movq %rbx, %rdi
movl $122, %ecx
jmp .LBB1_15
.LBB1_17:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.6, %esi
movl $.L.str.3, %edx
movq %rbx, %rdi
movl $123, %ecx
jmp .LBB1_15
.LBB1_19:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.6, %esi
movl $.L.str.3, %edx
movq %rbx, %rdi
movl $124, %ecx
jmp .LBB1_15
.LBB1_21:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.6, %esi
movl $.L.str.3, %edx
movq %rbx, %rdi
movl $128, %ecx
jmp .LBB1_15
.LBB1_23:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.6, %esi
movl $.L.str.3, %edx
movq %rbx, %rdi
movl $129, %ecx
jmp .LBB1_15
.LBB1_31:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movq %rax, %r10
subq $8, %rsp
.cfi_adjust_cfa_offset 8
movl $.L.str.7, %esi
movl $.L.str.3, %edx
movl $.L.str.5, %r8d
movq %rbx, %rdi
movl $143, %ecx
movl %ebp, %r9d
xorl %eax, %eax
pushq %r10
.cfi_adjust_cfa_offset 8
callq fprintf
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $-1, %edi
callq exit
.LBB1_28:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.6, %esi
movl $.L.str.3, %edx
movq %rbx, %rdi
movl $152, %ecx
.LBB1_15:
movl %ebp, %r8d
movq %rax, %r9
xorl %eax, %eax
callq fprintf
movl $-1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.globl _Z16CleanupResourcesv # -- Begin function _Z16CleanupResourcesv
.p2align 4, 0x90
.type _Z16CleanupResourcesv,@function
_Z16CleanupResourcesv: # @_Z16CleanupResourcesv
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movq d_A(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
callq hipFree
.LBB2_2:
movq d_B(%rip), %rdi
testq %rdi, %rdi
je .LBB2_4
# %bb.3:
callq hipFree
.LBB2_4:
movq d_C(%rip), %rdi
testq %rdi, %rdi
je .LBB2_6
# %bb.5:
callq hipFree
.LBB2_6:
movq h_A(%rip), %rdi
testq %rdi, %rdi
je .LBB2_8
# %bb.7:
callq free
.LBB2_8:
movq h_B(%rip), %rdi
testq %rdi, %rdi
je .LBB2_10
# %bb.9:
callq free
.LBB2_10:
movq h_C(%rip), %rdi
testq %rdi, %rdi
je .LBB2_11
# %bb.12:
popq %rax
.cfi_def_cfa_offset 8
jmp free # TAILCALL
.LBB2_11:
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z16CleanupResourcesv, .Lfunc_end2-_Z16CleanupResourcesv
.cfi_endproc
# -- End function
.globl _Z10RandomInitPji # -- Begin function _Z10RandomInitPji
.p2align 4, 0x90
.type _Z10RandomInitPji,@function
_Z10RandomInitPji: # @_Z10RandomInitPji
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB3_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
movq %rax, %rcx
shlq $30, %rcx
addq %rax, %rcx
movq %rcx, %rax
shrq $63, %rax
sarq $61, %rcx
addl %eax, %ecx
movl %ecx, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB3_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB3_4: # %._crit_edge
retq
.Lfunc_end3:
.size _Z10RandomInitPji, .Lfunc_end3-_Z10RandomInitPji
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12PowerKernal2PKjS0_Pjimi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type h_A,@object # @h_A
.bss
.globl h_A
.p2align 3, 0x0
h_A:
.quad 0
.size h_A, 8
.type h_B,@object # @h_B
.globl h_B
.p2align 3, 0x0
h_B:
.quad 0
.size h_B, 8
.type h_C,@object # @h_C
.globl h_C
.p2align 3, 0x0
h_C:
.quad 0
.size h_C, 8
.type d_A,@object # @d_A
.globl d_A
.p2align 3, 0x0
d_A:
.quad 0
.size d_A, 8
.type d_B,@object # @d_B
.globl d_B
.p2align 3, 0x0
d_B:
.quad 0
.size d_B, 8
.type d_C,@object # @d_C
.globl d_C
.p2align 3, 0x0
d_C:
.quad 0
.size d_C, 8
.type noprompt,@object # @noprompt
.globl noprompt
noprompt:
.byte 0 # 0x0
.size noprompt, 1
.type my_timer,@object # @my_timer
.globl my_timer
.p2align 2, 0x0
my_timer:
.long 0 # 0x0
.size my_timer, 4
.type _Z12PowerKernal2PKjS0_Pjimi,@object # @_Z12PowerKernal2PKjS0_Pjimi
.section .rodata,"a",@progbits
.globl _Z12PowerKernal2PKjS0_Pjimi
.p2align 3, 0x0
_Z12PowerKernal2PKjS0_Pjimi:
.quad _Z27__device_stub__PowerKernal2PKjS0_Pjimi
.size _Z12PowerKernal2PKjS0_Pjimi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "usage: %s #iterations #cores\n"
.size .L.str, 30
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Power Microbenchmarks with %llu iterations \n"
.size .L.str.1, 45
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/speverel/gpgpu-sim_simulations/master/benchmarks/src/cuda/gpuwattch-ubench/static_power_modeling/ACT_CORE2/INT_MUL_l1.hip"
.size .L.str.3, 179
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "kernel launch failure"
.size .L.str.5, 22
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "%s(%i) : CUDA Runtime API error %d: %s.\n"
.size .L.str.6, 41
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n"
.size .L.str.7, 56
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12PowerKernal2PKjS0_Pjimi"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "before"
.size .Lstr, 7
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "after"
.size .Lstr.1, 6
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__PowerKernal2PKjS0_Pjimi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym d_A
.addrsig_sym d_B
.addrsig_sym d_C
.addrsig_sym _Z12PowerKernal2PKjS0_Pjimi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00104a07_00000000-6_INT_MUL_l1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata._Z17__checkCudaErrors9cudaErrorPKci.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "%s(%i) : CUDA Runtime API error %d: %s.\n"
.section .text._Z17__checkCudaErrors9cudaErrorPKci,"axG",@progbits,_Z17__checkCudaErrors9cudaErrorPKci,comdat
.weak _Z17__checkCudaErrors9cudaErrorPKci
.type _Z17__checkCudaErrors9cudaErrorPKci, @function
_Z17__checkCudaErrors9cudaErrorPKci:
.LFB2057:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L8
ret
.L8:
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl %edi, %ebx
movq %rsi, %rbp
movl %edx, %r12d
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
movl %ebx, %r9d
movl %r12d, %r8d
movq %rbp, %rcx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z17__checkCudaErrors9cudaErrorPKci, .-_Z17__checkCudaErrors9cudaErrorPKci
.text
.globl _Z16CleanupResourcesv
.type _Z16CleanupResourcesv, @function
_Z16CleanupResourcesv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq d_A(%rip), %rdi
testq %rdi, %rdi
je .L10
call cudaFree@PLT
.L10:
movq d_B(%rip), %rdi
testq %rdi, %rdi
je .L11
call cudaFree@PLT
.L11:
movq d_C(%rip), %rdi
testq %rdi, %rdi
je .L12
call cudaFree@PLT
.L12:
movq h_A(%rip), %rdi
testq %rdi, %rdi
je .L13
call free@PLT
.L13:
movq h_B(%rip), %rdi
testq %rdi, %rdi
je .L14
call free@PLT
.L14:
movq h_C(%rip), %rdi
testq %rdi, %rdi
je .L9
call free@PLT
.L9:
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z16CleanupResourcesv, .-_Z16CleanupResourcesv
.globl _Z10RandomInitPji
.type _Z10RandomInitPji, @function
_Z10RandomInitPji:
.LFB2061:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L22
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L19:
call rand@PLT
movslq %eax, %rcx
movq %rcx, %rdx
salq $30, %rdx
addq %rcx, %rdx
sarq $61, %rdx
sarl $31, %eax
subl %eax, %edx
movl %edx, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L19
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L22:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2061:
.size _Z10RandomInitPji, .-_Z10RandomInitPji
.globl _Z41__device_stub__Z12PowerKernal2PKjS0_PjimiPKjS0_Pjimi
.type _Z41__device_stub__Z12PowerKernal2PKjS0_PjimiPKjS0_Pjimi, @function
_Z41__device_stub__Z12PowerKernal2PKjS0_PjimiPKjS0_Pjimi:
.LFB2086:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movq %r8, 8(%rsp)
movl %r9d, 16(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L29
.L25:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L30
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z12PowerKernal2PKjS0_Pjimi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L25
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z41__device_stub__Z12PowerKernal2PKjS0_PjimiPKjS0_Pjimi, .-_Z41__device_stub__Z12PowerKernal2PKjS0_PjimiPKjS0_Pjimi
.globl _Z12PowerKernal2PKjS0_Pjimi
.type _Z12PowerKernal2PKjS0_Pjimi, @function
_Z12PowerKernal2PKjS0_Pjimi:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z12PowerKernal2PKjS0_PjimiPKjS0_Pjimi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z12PowerKernal2PKjS0_Pjimi, .-_Z12PowerKernal2PKjS0_Pjimi
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "usage: %s #iterations #cores\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "Power Microbenchmarks with %llu iterations \n"
.section .rodata.str1.1
.LC3:
.string "before\n"
.section .rodata.str1.8
.align 8
.LC4:
.string "/home/ubuntu/Datasets/stackv2/train-structured/speverel/gpgpu-sim_simulations/master/benchmarks/src/cuda/gpuwattch-ubench/static_power_modeling/ACT_CORE2/INT_MUL_l1.cu"
.section .rodata.str1.1
.LC5:
.string "after\n"
.LC6:
.string "kernel launch failure"
.section .rodata.str1.8
.align 8
.LC7:
.string "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n"
.text
.globl main
.type main, @function
main:
.LFB2059:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rsi, %rbx
cmpl $4, %edi
jne .L41
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtoll@PLT
movq %rax, %r14
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r12
movq 24(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r15
movq %r14, %rdx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r12d, %r13d
sall $10, %r13d
movslq %r13d, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, h_A(%rip)
testq %rax, %rax
je .L42
.L35:
movq %rbx, %rdi
call malloc@PLT
movq %rax, h_B(%rip)
testq %rax, %rax
je .L43
.L36:
movq %rbx, %rdi
call malloc@PLT
movq %rax, h_C(%rip)
testq %rax, %rax
je .L44
.L37:
movl %r13d, %esi
movq h_A(%rip), %rdi
call _Z10RandomInitPji
movl %r13d, %esi
movq h_B(%rip), %rdi
call _Z10RandomInitPji
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rsi
leaq d_A(%rip), %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $122, %edx
leaq .LC4(%rip), %rbp
movq %rbp, %rsi
call _Z17__checkCudaErrors9cudaErrorPKci
movq %rbx, %rsi
leaq d_B(%rip), %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $123, %edx
movq %rbp, %rsi
call _Z17__checkCudaErrors9cudaErrorPKci
movq %rbx, %rsi
leaq d_C(%rip), %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $124, %edx
movq %rbp, %rsi
call _Z17__checkCudaErrors9cudaErrorPKci
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %ecx
movq %rbx, %rdx
movq h_A(%rip), %rsi
movq d_A(%rip), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $128, %edx
movq %rbp, %rsi
call _Z17__checkCudaErrors9cudaErrorPKci
movl $1, %ecx
movq %rbx, %rdx
movq h_B(%rip), %rsi
movq d_B(%rip), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $129, %edx
movq %rbp, %rsi
call _Z17__checkCudaErrors9cudaErrorPKci
movl %r12d, 8(%rsp)
movl $1, 12(%rsp)
movl $1024, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L45
.L38:
call cudaThreadSynchronize@PLT
call cudaGetLastError@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L46
call cudaThreadSynchronize@PLT
movl $2, %ecx
movq %rbx, %rdx
movq d_C(%rip), %rsi
movq h_C(%rip), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $152, %edx
leaq .LC4(%rip), %rsi
call _Z17__checkCudaErrors9cudaErrorPKci
call _Z16CleanupResourcesv
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
movq (%rsi), %rcx
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L42:
call _Z16CleanupResourcesv
jmp .L35
.L43:
call _Z16CleanupResourcesv
jmp .L36
.L44:
call _Z16CleanupResourcesv
jmp .L37
.L45:
movl %r15d, %r9d
movq %r14, %r8
movl %r13d, %ecx
movq d_C(%rip), %rdx
movq d_B(%rip), %rsi
movq d_A(%rip), %rdi
call _Z41__device_stub__Z12PowerKernal2PKjS0_PjimiPKjS0_Pjimi
jmp .L38
.L46:
movl %eax, %edi
call cudaGetErrorString@PLT
pushq %rax
.cfi_def_cfa_offset 104
pushq %rbp
.cfi_def_cfa_offset 112
leaq .LC6(%rip), %r9
movl $143, %r8d
leaq .LC4(%rip), %rcx
leaq .LC7(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.cfi_endproc
.LFE2059:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z12PowerKernal2PKjS0_Pjimi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z12PowerKernal2PKjS0_Pjimi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl my_timer
.bss
.align 4
.type my_timer, @object
.size my_timer, 4
my_timer:
.zero 4
.globl noprompt
.type noprompt, @object
.size noprompt, 1
noprompt:
.zero 1
.globl d_C
.align 8
.type d_C, @object
.size d_C, 8
d_C:
.zero 8
.globl d_B
.align 8
.type d_B, @object
.size d_B, 8
d_B:
.zero 8
.globl d_A
.align 8
.type d_A, @object
.size d_A, 8
d_A:
.zero 8
.globl h_C
.align 8
.type h_C, @object
.size h_C, 8
h_C:
.zero 8
.globl h_B
.align 8
.type h_B, @object
.size h_B, 8
h_B:
.zero 8
.globl h_A
.align 8
.type h_A, @object
.size h_A, 8
h_A:
.zero 8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "INT_MUL_l1.hip"
.globl _Z27__device_stub__PowerKernal2PKjS0_Pjimi # -- Begin function _Z27__device_stub__PowerKernal2PKjS0_Pjimi
.p2align 4, 0x90
.type _Z27__device_stub__PowerKernal2PKjS0_Pjimi,@function
_Z27__device_stub__PowerKernal2PKjS0_Pjimi: # @_Z27__device_stub__PowerKernal2PKjS0_Pjimi
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 12(%rsp)
movq %r8, 64(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 12(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12PowerKernal2PKjS0_Pjimi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z27__device_stub__PowerKernal2PKjS0_Pjimi, .Lfunc_end0-_Z27__device_stub__PowerKernal2PKjS0_Pjimi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $168, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
cmpl $4, %edi
jne .LBB1_30
# %bb.1:
movq 8(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtoll
movq %rax, %r14
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r12
movq 24(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, 16(%rsp) # 8-byte Spill
movl $.L.str.1, %edi
movq %r14, 24(%rsp) # 8-byte Spill
movq %r14, %rsi
xorl %eax, %eax
callq printf
movl %r12d, %ebp
shll $10, %ebp
movslq %ebp, %rbx
shlq $2, %rbx
movq %rbx, %rdi
callq malloc
movq %rax, h_A(%rip)
testq %rax, %rax
jne .LBB1_3
# %bb.2:
callq _Z16CleanupResourcesv
.LBB1_3:
movq %rbx, %rdi
callq malloc
movq %rax, h_B(%rip)
testq %rax, %rax
jne .LBB1_5
# %bb.4:
callq _Z16CleanupResourcesv
.LBB1_5:
movq %rbx, %rdi
callq malloc
movq %rax, h_C(%rip)
testq %rax, %rax
jne .LBB1_7
# %bb.6:
callq _Z16CleanupResourcesv
.LBB1_7:
movl %ebp, %r13d
testl %ebp, %ebp
jle .LBB1_10
# %bb.8: # %.lr.ph.preheader.i
movq h_A(%rip), %r15
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_9: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
movq %rax, %rcx
shlq $30, %rcx
addq %rax, %rcx
movq %rcx, %rax
shrq $63, %rax
sarq $61, %rcx
addl %eax, %ecx
movl %ecx, (%r15,%r14,4)
incq %r14
cmpq %r14, %r13
jne .LBB1_9
.LBB1_10: # %_Z10RandomInitPji.exit
testl %ebp, %ebp
jle .LBB1_13
# %bb.11: # %.lr.ph.preheader.i30
movq h_B(%rip), %r15
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_12: # %.lr.ph.i32
# =>This Inner Loop Header: Depth=1
callq rand
cltq
movq %rax, %rcx
shlq $30, %rcx
addq %rax, %rcx
movq %rcx, %rax
shrq $63, %rax
sarq $61, %rcx
addl %eax, %ecx
movl %ecx, (%r15,%r14,4)
incq %r14
cmpq %r14, %r13
jne .LBB1_12
.LBB1_13: # %_Z10RandomInitPji.exit36
movl $.Lstr, %edi
callq puts@PLT
movl $d_A, %edi
movq %rbx, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB1_14
# %bb.16: # %_Z17__checkCudaErrors10hipError_tPKci.exit
movl $d_B, %edi
movq %rbx, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB1_17
# %bb.18: # %_Z17__checkCudaErrors10hipError_tPKci.exit38
movl $d_C, %edi
movq %rbx, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB1_19
# %bb.20: # %_Z17__checkCudaErrors10hipError_tPKci.exit40
movl $.Lstr.1, %edi
callq puts@PLT
movq d_A(%rip), %rdi
movq h_A(%rip), %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_21
# %bb.22: # %_Z17__checkCudaErrors10hipError_tPKci.exit42
movq d_B(%rip), %rdi
movq h_B(%rip), %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_23
# %bb.24: # %_Z17__checkCudaErrors10hipError_tPKci.exit44
movl %r12d, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $1024, %rdx # imm = 0x400
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_26
# %bb.25:
movq d_A(%rip), %rax
movq d_B(%rip), %rcx
movq d_C(%rip), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %ebp, 12(%rsp)
movq 24(%rsp), %rax # 8-byte Reload
movq %rax, 80(%rsp)
movq 16(%rsp), %rax # 8-byte Reload
movl %eax, 8(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 80(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z12PowerKernal2PKjS0_Pjimi, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_26:
callq hipDeviceSynchronize
callq hipGetLastError
testl %eax, %eax
jne .LBB1_31
# %bb.27: # %_Z18__getLastCudaErrorPKcS0_i.exit
callq hipDeviceSynchronize
movq h_C(%rip), %rdi
movq d_C(%rip), %rsi
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_28
# %bb.29: # %_Z17__checkCudaErrors10hipError_tPKci.exit47
callq _Z16CleanupResourcesv
xorl %eax, %eax
addq $168, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_30:
.cfi_def_cfa_offset 224
movq stderr(%rip), %rdi
movq (%rbx), %rdx
movl $.L.str, %esi
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.LBB1_14:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.6, %esi
movl $.L.str.3, %edx
movq %rbx, %rdi
movl $122, %ecx
jmp .LBB1_15
.LBB1_17:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.6, %esi
movl $.L.str.3, %edx
movq %rbx, %rdi
movl $123, %ecx
jmp .LBB1_15
.LBB1_19:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.6, %esi
movl $.L.str.3, %edx
movq %rbx, %rdi
movl $124, %ecx
jmp .LBB1_15
.LBB1_21:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.6, %esi
movl $.L.str.3, %edx
movq %rbx, %rdi
movl $128, %ecx
jmp .LBB1_15
.LBB1_23:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.6, %esi
movl $.L.str.3, %edx
movq %rbx, %rdi
movl $129, %ecx
jmp .LBB1_15
.LBB1_31:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movq %rax, %r10
subq $8, %rsp
.cfi_adjust_cfa_offset 8
movl $.L.str.7, %esi
movl $.L.str.3, %edx
movl $.L.str.5, %r8d
movq %rbx, %rdi
movl $143, %ecx
movl %ebp, %r9d
xorl %eax, %eax
pushq %r10
.cfi_adjust_cfa_offset 8
callq fprintf
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $-1, %edi
callq exit
.LBB1_28:
movq stderr(%rip), %rbx
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
movl $.L.str.6, %esi
movl $.L.str.3, %edx
movq %rbx, %rdi
movl $152, %ecx
.LBB1_15:
movl %ebp, %r8d
movq %rax, %r9
xorl %eax, %eax
callq fprintf
movl $-1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.globl _Z16CleanupResourcesv # -- Begin function _Z16CleanupResourcesv
.p2align 4, 0x90
.type _Z16CleanupResourcesv,@function
_Z16CleanupResourcesv: # @_Z16CleanupResourcesv
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movq d_A(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
callq hipFree
.LBB2_2:
movq d_B(%rip), %rdi
testq %rdi, %rdi
je .LBB2_4
# %bb.3:
callq hipFree
.LBB2_4:
movq d_C(%rip), %rdi
testq %rdi, %rdi
je .LBB2_6
# %bb.5:
callq hipFree
.LBB2_6:
movq h_A(%rip), %rdi
testq %rdi, %rdi
je .LBB2_8
# %bb.7:
callq free
.LBB2_8:
movq h_B(%rip), %rdi
testq %rdi, %rdi
je .LBB2_10
# %bb.9:
callq free
.LBB2_10:
movq h_C(%rip), %rdi
testq %rdi, %rdi
je .LBB2_11
# %bb.12:
popq %rax
.cfi_def_cfa_offset 8
jmp free # TAILCALL
.LBB2_11:
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z16CleanupResourcesv, .Lfunc_end2-_Z16CleanupResourcesv
.cfi_endproc
# -- End function
.globl _Z10RandomInitPji # -- Begin function _Z10RandomInitPji
.p2align 4, 0x90
.type _Z10RandomInitPji,@function
_Z10RandomInitPji: # @_Z10RandomInitPji
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB3_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
movq %rax, %rcx
shlq $30, %rcx
addq %rax, %rcx
movq %rcx, %rax
shrq $63, %rax
sarq $61, %rcx
addl %eax, %ecx
movl %ecx, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB3_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB3_4: # %._crit_edge
retq
.Lfunc_end3:
.size _Z10RandomInitPji, .Lfunc_end3-_Z10RandomInitPji
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12PowerKernal2PKjS0_Pjimi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type h_A,@object # @h_A
.bss
.globl h_A
.p2align 3, 0x0
h_A:
.quad 0
.size h_A, 8
.type h_B,@object # @h_B
.globl h_B
.p2align 3, 0x0
h_B:
.quad 0
.size h_B, 8
.type h_C,@object # @h_C
.globl h_C
.p2align 3, 0x0
h_C:
.quad 0
.size h_C, 8
.type d_A,@object # @d_A
.globl d_A
.p2align 3, 0x0
d_A:
.quad 0
.size d_A, 8
.type d_B,@object # @d_B
.globl d_B
.p2align 3, 0x0
d_B:
.quad 0
.size d_B, 8
.type d_C,@object # @d_C
.globl d_C
.p2align 3, 0x0
d_C:
.quad 0
.size d_C, 8
.type noprompt,@object # @noprompt
.globl noprompt
noprompt:
.byte 0 # 0x0
.size noprompt, 1
.type my_timer,@object # @my_timer
.globl my_timer
.p2align 2, 0x0
my_timer:
.long 0 # 0x0
.size my_timer, 4
.type _Z12PowerKernal2PKjS0_Pjimi,@object # @_Z12PowerKernal2PKjS0_Pjimi
.section .rodata,"a",@progbits
.globl _Z12PowerKernal2PKjS0_Pjimi
.p2align 3, 0x0
_Z12PowerKernal2PKjS0_Pjimi:
.quad _Z27__device_stub__PowerKernal2PKjS0_Pjimi
.size _Z12PowerKernal2PKjS0_Pjimi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "usage: %s #iterations #cores\n"
.size .L.str, 30
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Power Microbenchmarks with %llu iterations \n"
.size .L.str.1, 45
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/speverel/gpgpu-sim_simulations/master/benchmarks/src/cuda/gpuwattch-ubench/static_power_modeling/ACT_CORE2/INT_MUL_l1.hip"
.size .L.str.3, 179
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "kernel launch failure"
.size .L.str.5, 22
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "%s(%i) : CUDA Runtime API error %d: %s.\n"
.size .L.str.6, 41
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n"
.size .L.str.7, 56
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12PowerKernal2PKjS0_Pjimi"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "before"
.size .Lstr, 7
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "after"
.size .Lstr.1, 6
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__PowerKernal2PKjS0_Pjimi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym d_A
.addrsig_sym d_B
.addrsig_sym d_C
.addrsig_sym _Z12PowerKernal2PKjS0_Pjimi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda_fp16.h>
#include <iostream>
#include <cmath>
#include <cfloat>
#include <math.h>
#include <algorithm>
using namespace std;
// nvcc -ccbin clang++-3.8 cufp16_test.cu
// NOTE: didn't get any error like building ArrayFire
int main()
{
cout << isinf(NAN) << endl; // 0
cout << isinf(INFINITY) << endl; // 1
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda_fp16.h>
#include <iostream>
#include <cmath>
#include <cfloat>
#include <math.h>
#include <algorithm>
using namespace std;
// nvcc -ccbin clang++-3.8 cufp16_test.cu
// NOTE: didn't get any error like building ArrayFire
int main()
{
cout << isinf(NAN) << endl; // 0
cout << isinf(INFINITY) << endl; // 1
} | .file "tmpxft_001aa2f6_00000000-6_cufp16_test.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4300:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4300:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl main
.type main, @function
main:
.LFB4297:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movl $0, %esi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZNSo9_M_insertIbEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $1, %esi
movq %rbx, %rdi
call _ZNSo9_M_insertIbEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $0, %eax
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4297:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4323:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4323:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda_fp16.h>
#include <iostream>
#include <cmath>
#include <cfloat>
#include <math.h>
#include <algorithm>
using namespace std;
// nvcc -ccbin clang++-3.8 cufp16_test.cu
// NOTE: didn't get any error like building ArrayFire
int main()
{
cout << isinf(NAN) << endl; // 0
cout << isinf(INFINITY) << endl; // 1
} | #include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <iostream>
#include <cmath>
#include <cfloat>
#include <math.h>
#include <algorithm>
using namespace std;
// nvcc -ccbin clang++-3.8 cufp16_test.cu
// NOTE: didn't get any error like building ArrayFire
int main()
{
cout << isinf(NAN) << endl; // 0
cout << isinf(INFINITY) << endl; // 1
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <iostream>
#include <cmath>
#include <cfloat>
#include <math.h>
#include <algorithm>
using namespace std;
// nvcc -ccbin clang++-3.8 cufp16_test.cu
// NOTE: didn't get any error like building ArrayFire
int main()
{
cout << isinf(NAN) << endl; // 0
cout << isinf(INFINITY) << endl; // 1
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <iostream>
#include <cmath>
#include <cfloat>
#include <math.h>
#include <algorithm>
using namespace std;
// nvcc -ccbin clang++-3.8 cufp16_test.cu
// NOTE: didn't get any error like building ArrayFire
int main()
{
cout << isinf(NAN) << endl; // 0
cout << isinf(INFINITY) << endl; // 1
} | .text
.file "cufp16_test.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $_ZSt4cout, %edi
xorl %esi, %esi
callq _ZNSo9_M_insertIbEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB0_9
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB0_3
# %bb.2:
movzbl 67(%rbx), %ecx
jmp .LBB0_4
.LBB0_3:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB0_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $1, %esi
callq _ZNSo9_M_insertIbEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB0_9
# %bb.5: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i2
cmpb $0, 56(%rbx)
je .LBB0_7
# %bb.6:
movzbl 67(%rbx), %ecx
jmp .LBB0_8
.LBB0_7:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB0_8: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit5
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB0_9:
.cfi_def_cfa_offset 32
callq _ZSt16__throw_bad_castv
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001aa2f6_00000000-6_cufp16_test.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4300:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4300:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl main
.type main, @function
main:
.LFB4297:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movl $0, %esi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZNSo9_M_insertIbEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $1, %esi
movq %rbx, %rdi
call _ZNSo9_M_insertIbEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $0, %eax
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4297:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4323:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4323:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cufp16_test.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $_ZSt4cout, %edi
xorl %esi, %esi
callq _ZNSo9_M_insertIbEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB0_9
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB0_3
# %bb.2:
movzbl 67(%rbx), %ecx
jmp .LBB0_4
.LBB0_3:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB0_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $1, %esi
callq _ZNSo9_M_insertIbEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB0_9
# %bb.5: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i2
cmpb $0, 56(%rbx)
je .LBB0_7
# %bb.6:
movzbl 67(%rbx), %ecx
jmp .LBB0_8
.LBB0_7:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB0_8: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit5
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB0_9:
.cfi_def_cfa_offset 32
callq _ZSt16__throw_bad_castv
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*******************************************************************************
* serveral useful gpu functions will be defined in this file to facilitate
* the surface redistance scheme
******************************************************************************/
typedef struct
{
double sR;
double sL;
} double_eno_derivative;
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
__device__ inline
double min_mod(double x, double y)
{
return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y);
}
__device__ inline
double sign(double x)
{
return (x>0) ? 1.0 : -1.0;
}
__device__ inline
bool same_sign(double x, double y)
{
return (x*y>0) || (x==0 && y==0);
}
__device__ inline
void advection_velocity(double & H1, double & H2, double & H3, double sign, double Dx, double Dy, double Dz, double nx, double ny, double nz)
{
double normal_d = nx * Dx + ny + Dy + nz * Dz;
H1 = sign * (Dx - nx * normal_d);
H2 = sign * (Dy - ny * normal_d);
H3 = sign * (Dz - nz * normal_d);
double H_mag = sqrt(H1*H1+H2*H2+H3*H3+1e-6);
H1 = H1/H_mag;
H2 = H2/H_mag;
H3 = H3/H_mag;
}
// convert subindex to linear index
// periodic boundary conditions are assumed
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
/******************************************************************************
* calculate Eno derivatives at node v0: [v4,v1,v0,v2,v3]
******************************************************************************/
__device__ inline
double_eno_derivative eno_derivative( double v4, double v1, double v0, double v2, double v3, double pr, double pl, double ds)
{
double p2m;
double_eno_derivative eno_d;
double p2 = v1 - 2.0 * v0 + v2;
double p2r = v0 - 2.0 * v2 + v3;
p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2);
double vr = (pr==ds) ? v2 : 0;
eno_d.sR = (vr - v0) / pr - pr * p2m;
double p2l = v0 - 2.0 * v1 + v4;
p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2);
double vl = (pl==ds) ? v1 : 0;
eno_d.sL = (v0 - vl) / pl + pl * p2m;
return eno_d;
}
// calculate surface redistance step
// now lsf represents the auxilary level set function(not the level set function)
// inputs : the auxilary level set function, sign of the initial level set function, distance to the interface, normal vectors
__global__
void surface_redistance_step(double * step, double const * lsf, double const * sign, double const * deltat, double const * nx, double const * ny, double const * nz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
double_eno_derivative eno_dx = eno_derivative( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx);
double Dx[3] = {eno_dx.sR, 0, eno_dx.sL};
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
double_eno_derivative eno_dy = eno_derivative( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy);
double Dy[3] = {eno_dy.sR, 0, eno_dy.sL};
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
double_eno_derivative eno_dz = eno_derivative( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz);
double Dz[3] = {eno_dz.sR, 0, eno_dz.sL};
//Forward=-1, None=0, BackWard=1
int const choice_x[26] = {-1,-1,-1,-1, 1, 1, 1, 1, 0, 0, 0, 0,-1,-1, 1, 1,-1,-1, 1, 1, -1, 1, 0, 0, 0, 0};
int const choice_y[26] = {-1,-1, 1, 1,-1,-1, 1, 1, -1,-1, 1, 1, 0, 0, 0, 0,-1, 1,-1, 1, 0, 0,-1, 1, 0, 0};
int const choice_z[26] = {-1, 1,-1, 1,-1, 1,-1, 1, -1, 1,-1, 1,-1, 1,-1, 1, 0, 0, 0, 0, 0, 0, 0, 0,-1, 1};
double Nx = nx[ind];
double Ny = ny[ind];
double Nz = nz[ind];
double Sign = sign[ind];
double dx_c = (Dx[0] + Dx[2]) / 2;
double dy_c = (Dy[0] + Dy[2]) / 2;
double dz_c = (Dz[0] + Dz[2]) / 2;
double maxH1 = 0;
double maxH2 = 0;
double maxH3 = 0;
for(int i=0;i<26;i++){
double dr_x = Dx[choice_x[i]+1];
double dr_y = Dy[choice_y[i]+1];
double dr_z = Dz[choice_z[i]+1];
double H1, H2, H3; // information propagation direction
advection_velocity(H1,H2,H3,Sign,dr_x,dr_y,dr_z,Nx,Ny,Nz);
maxH1 = (fabs(H1)>maxH1) ? fabs(H1) : maxH1;
maxH2 = (fabs(H2)>maxH2) ? fabs(H2) : maxH1;
maxH3 = (fabs(H3)>maxH3) ? fabs(H3) : maxH1;
}
double dt = deltat[ind];
//step[ind] = dt*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1) - 0.5*(Dx[0]-Dx[2]) - 0.5*(Dy[0]-Dy[2]) - 0.5*(Dz[0]-Dz[2]);
step[ind] = dt*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1) - 0.5*dt*(maxH1*(Dx[0]-Dx[2]) - maxH2*(Dy[0]-Dy[2]) - maxH3*(Dz[0]-Dz[2]));
//step[ind] = deltat[ind]*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1);
//step[ind] = maxH3;
//step[ind] = Dz[0] - Dz[2];
// step[ind] = Dz[0];
} | .file "tmpxft_0008a03d_00000000-6_enork2_surface_redistance.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2037:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2037:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z86__device_stub__Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddiPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.type _Z86__device_stub__Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddiPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, @function
_Z86__device_stub__Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddiPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi:
.LFB2059:
.cfi_startproc
endbr64
subq $376, %rsp
.cfi_def_cfa_offset 384
movq %rdi, 120(%rsp)
movq %rsi, 112(%rsp)
movq %rdx, 104(%rsp)
movq %rcx, 96(%rsp)
movq %r8, 88(%rsp)
movq %r9, 80(%rsp)
movsd %xmm0, 16(%rsp)
movsd %xmm1, 8(%rsp)
movsd %xmm2, (%rsp)
movq 384(%rsp), %rax
movq %rax, 72(%rsp)
movq 392(%rsp), %rax
movq %rax, 64(%rsp)
movq 400(%rsp), %rax
movq %rax, 56(%rsp)
movq 408(%rsp), %rax
movq %rax, 48(%rsp)
movq 416(%rsp), %rax
movq %rax, 40(%rsp)
movq 424(%rsp), %rax
movq %rax, 32(%rsp)
movq 432(%rsp), %rax
movq %rax, 24(%rsp)
movq %fs:40, %rax
movq %rax, 360(%rsp)
xorl %eax, %eax
leaq 120(%rsp), %rax
movq %rax, 192(%rsp)
leaq 112(%rsp), %rax
movq %rax, 200(%rsp)
leaq 104(%rsp), %rax
movq %rax, 208(%rsp)
leaq 96(%rsp), %rax
movq %rax, 216(%rsp)
leaq 88(%rsp), %rax
movq %rax, 224(%rsp)
leaq 80(%rsp), %rax
movq %rax, 232(%rsp)
leaq 72(%rsp), %rax
movq %rax, 240(%rsp)
leaq 64(%rsp), %rax
movq %rax, 248(%rsp)
leaq 56(%rsp), %rax
movq %rax, 256(%rsp)
leaq 48(%rsp), %rax
movq %rax, 264(%rsp)
leaq 40(%rsp), %rax
movq %rax, 272(%rsp)
leaq 32(%rsp), %rax
movq %rax, 280(%rsp)
leaq 24(%rsp), %rax
movq %rax, 288(%rsp)
leaq 440(%rsp), %rax
movq %rax, 296(%rsp)
leaq 448(%rsp), %rax
movq %rax, 304(%rsp)
leaq 456(%rsp), %rax
movq %rax, 312(%rsp)
leaq 16(%rsp), %rax
movq %rax, 320(%rsp)
leaq 8(%rsp), %rax
movq %rax, 328(%rsp)
movq %rsp, %rax
movq %rax, 336(%rsp)
leaq 464(%rsp), %rax
movq %rax, 344(%rsp)
movl $1, 144(%rsp)
movl $1, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl $1, 160(%rsp)
movl $1, 164(%rsp)
leaq 136(%rsp), %rcx
leaq 128(%rsp), %rdx
leaq 156(%rsp), %rsi
leaq 144(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 360(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $376, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 136(%rsp)
.cfi_def_cfa_offset 392
pushq 136(%rsp)
.cfi_def_cfa_offset 400
leaq 208(%rsp), %r9
movq 172(%rsp), %rcx
movl 180(%rsp), %r8d
movq 160(%rsp), %rsi
movl 168(%rsp), %edx
leaq _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 384
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size _Z86__device_stub__Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddiPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, .-_Z86__device_stub__Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddiPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.globl _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.type _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, @function
_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi:
.LFB2060:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 104(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 104(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 104(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 104(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
pushq 104(%rsp)
.cfi_def_cfa_offset 64
pushq 104(%rsp)
.cfi_def_cfa_offset 72
pushq 104(%rsp)
.cfi_def_cfa_offset 80
pushq 104(%rsp)
.cfi_def_cfa_offset 88
pushq 104(%rsp)
.cfi_def_cfa_offset 96
pushq 104(%rsp)
.cfi_def_cfa_offset 104
pushq 104(%rsp)
.cfi_def_cfa_offset 112
call _Z86__device_stub__Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddiPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
addq $104, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, .-_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*******************************************************************************
* serveral useful gpu functions will be defined in this file to facilitate
* the surface redistance scheme
******************************************************************************/
typedef struct
{
double sR;
double sL;
} double_eno_derivative;
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
__device__ inline
double min_mod(double x, double y)
{
return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y);
}
__device__ inline
double sign(double x)
{
return (x>0) ? 1.0 : -1.0;
}
__device__ inline
bool same_sign(double x, double y)
{
return (x*y>0) || (x==0 && y==0);
}
__device__ inline
void advection_velocity(double & H1, double & H2, double & H3, double sign, double Dx, double Dy, double Dz, double nx, double ny, double nz)
{
double normal_d = nx * Dx + ny + Dy + nz * Dz;
H1 = sign * (Dx - nx * normal_d);
H2 = sign * (Dy - ny * normal_d);
H3 = sign * (Dz - nz * normal_d);
double H_mag = sqrt(H1*H1+H2*H2+H3*H3+1e-6);
H1 = H1/H_mag;
H2 = H2/H_mag;
H3 = H3/H_mag;
}
// convert subindex to linear index
// periodic boundary conditions are assumed
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
/******************************************************************************
* calculate Eno derivatives at node v0: [v4,v1,v0,v2,v3]
******************************************************************************/
__device__ inline
double_eno_derivative eno_derivative( double v4, double v1, double v0, double v2, double v3, double pr, double pl, double ds)
{
double p2m;
double_eno_derivative eno_d;
double p2 = v1 - 2.0 * v0 + v2;
double p2r = v0 - 2.0 * v2 + v3;
p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2);
double vr = (pr==ds) ? v2 : 0;
eno_d.sR = (vr - v0) / pr - pr * p2m;
double p2l = v0 - 2.0 * v1 + v4;
p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2);
double vl = (pl==ds) ? v1 : 0;
eno_d.sL = (v0 - vl) / pl + pl * p2m;
return eno_d;
}
// calculate surface redistance step
// now lsf represents the auxilary level set function(not the level set function)
// inputs : the auxilary level set function, sign of the initial level set function, distance to the interface, normal vectors
__global__
void surface_redistance_step(double * step, double const * lsf, double const * sign, double const * deltat, double const * nx, double const * ny, double const * nz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
double_eno_derivative eno_dx = eno_derivative( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx);
double Dx[3] = {eno_dx.sR, 0, eno_dx.sL};
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
double_eno_derivative eno_dy = eno_derivative( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy);
double Dy[3] = {eno_dy.sR, 0, eno_dy.sL};
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
double_eno_derivative eno_dz = eno_derivative( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz);
double Dz[3] = {eno_dz.sR, 0, eno_dz.sL};
//Forward=-1, None=0, BackWard=1
int const choice_x[26] = {-1,-1,-1,-1, 1, 1, 1, 1, 0, 0, 0, 0,-1,-1, 1, 1,-1,-1, 1, 1, -1, 1, 0, 0, 0, 0};
int const choice_y[26] = {-1,-1, 1, 1,-1,-1, 1, 1, -1,-1, 1, 1, 0, 0, 0, 0,-1, 1,-1, 1, 0, 0,-1, 1, 0, 0};
int const choice_z[26] = {-1, 1,-1, 1,-1, 1,-1, 1, -1, 1,-1, 1,-1, 1,-1, 1, 0, 0, 0, 0, 0, 0, 0, 0,-1, 1};
double Nx = nx[ind];
double Ny = ny[ind];
double Nz = nz[ind];
double Sign = sign[ind];
double dx_c = (Dx[0] + Dx[2]) / 2;
double dy_c = (Dy[0] + Dy[2]) / 2;
double dz_c = (Dz[0] + Dz[2]) / 2;
double maxH1 = 0;
double maxH2 = 0;
double maxH3 = 0;
for(int i=0;i<26;i++){
double dr_x = Dx[choice_x[i]+1];
double dr_y = Dy[choice_y[i]+1];
double dr_z = Dz[choice_z[i]+1];
double H1, H2, H3; // information propagation direction
advection_velocity(H1,H2,H3,Sign,dr_x,dr_y,dr_z,Nx,Ny,Nz);
maxH1 = (fabs(H1)>maxH1) ? fabs(H1) : maxH1;
maxH2 = (fabs(H2)>maxH2) ? fabs(H2) : maxH1;
maxH3 = (fabs(H3)>maxH3) ? fabs(H3) : maxH1;
}
double dt = deltat[ind];
//step[ind] = dt*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1) - 0.5*(Dx[0]-Dx[2]) - 0.5*(Dy[0]-Dy[2]) - 0.5*(Dz[0]-Dz[2]);
step[ind] = dt*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1) - 0.5*dt*(maxH1*(Dx[0]-Dx[2]) - maxH2*(Dy[0]-Dy[2]) - maxH3*(Dz[0]-Dz[2]));
//step[ind] = deltat[ind]*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1);
//step[ind] = maxH3;
//step[ind] = Dz[0] - Dz[2];
// step[ind] = Dz[0];
} | #include <hip/hip_runtime.h>
/*******************************************************************************
* serveral useful gpu functions will be defined in this file to facilitate
* the surface redistance scheme
******************************************************************************/
typedef struct
{
double sR;
double sL;
} double_eno_derivative;
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
__device__ inline
double min_mod(double x, double y)
{
return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y);
}
__device__ inline
double sign(double x)
{
return (x>0) ? 1.0 : -1.0;
}
__device__ inline
bool same_sign(double x, double y)
{
return (x*y>0) || (x==0 && y==0);
}
__device__ inline
void advection_velocity(double & H1, double & H2, double & H3, double sign, double Dx, double Dy, double Dz, double nx, double ny, double nz)
{
double normal_d = nx * Dx + ny + Dy + nz * Dz;
H1 = sign * (Dx - nx * normal_d);
H2 = sign * (Dy - ny * normal_d);
H3 = sign * (Dz - nz * normal_d);
double H_mag = sqrt(H1*H1+H2*H2+H3*H3+1e-6);
H1 = H1/H_mag;
H2 = H2/H_mag;
H3 = H3/H_mag;
}
// convert subindex to linear index
// periodic boundary conditions are assumed
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
/******************************************************************************
* calculate Eno derivatives at node v0: [v4,v1,v0,v2,v3]
******************************************************************************/
__device__ inline
double_eno_derivative eno_derivative( double v4, double v1, double v0, double v2, double v3, double pr, double pl, double ds)
{
double p2m;
double_eno_derivative eno_d;
double p2 = v1 - 2.0 * v0 + v2;
double p2r = v0 - 2.0 * v2 + v3;
p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2);
double vr = (pr==ds) ? v2 : 0;
eno_d.sR = (vr - v0) / pr - pr * p2m;
double p2l = v0 - 2.0 * v1 + v4;
p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2);
double vl = (pl==ds) ? v1 : 0;
eno_d.sL = (v0 - vl) / pl + pl * p2m;
return eno_d;
}
// calculate surface redistance step
// now lsf represents the auxilary level set function(not the level set function)
// inputs : the auxilary level set function, sign of the initial level set function, distance to the interface, normal vectors
__global__
void surface_redistance_step(double * step, double const * lsf, double const * sign, double const * deltat, double const * nx, double const * ny, double const * nz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
double_eno_derivative eno_dx = eno_derivative( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx);
double Dx[3] = {eno_dx.sR, 0, eno_dx.sL};
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
double_eno_derivative eno_dy = eno_derivative( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy);
double Dy[3] = {eno_dy.sR, 0, eno_dy.sL};
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
double_eno_derivative eno_dz = eno_derivative( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz);
double Dz[3] = {eno_dz.sR, 0, eno_dz.sL};
//Forward=-1, None=0, BackWard=1
int const choice_x[26] = {-1,-1,-1,-1, 1, 1, 1, 1, 0, 0, 0, 0,-1,-1, 1, 1,-1,-1, 1, 1, -1, 1, 0, 0, 0, 0};
int const choice_y[26] = {-1,-1, 1, 1,-1,-1, 1, 1, -1,-1, 1, 1, 0, 0, 0, 0,-1, 1,-1, 1, 0, 0,-1, 1, 0, 0};
int const choice_z[26] = {-1, 1,-1, 1,-1, 1,-1, 1, -1, 1,-1, 1,-1, 1,-1, 1, 0, 0, 0, 0, 0, 0, 0, 0,-1, 1};
double Nx = nx[ind];
double Ny = ny[ind];
double Nz = nz[ind];
double Sign = sign[ind];
double dx_c = (Dx[0] + Dx[2]) / 2;
double dy_c = (Dy[0] + Dy[2]) / 2;
double dz_c = (Dz[0] + Dz[2]) / 2;
double maxH1 = 0;
double maxH2 = 0;
double maxH3 = 0;
for(int i=0;i<26;i++){
double dr_x = Dx[choice_x[i]+1];
double dr_y = Dy[choice_y[i]+1];
double dr_z = Dz[choice_z[i]+1];
double H1, H2, H3; // information propagation direction
advection_velocity(H1,H2,H3,Sign,dr_x,dr_y,dr_z,Nx,Ny,Nz);
maxH1 = (fabs(H1)>maxH1) ? fabs(H1) : maxH1;
maxH2 = (fabs(H2)>maxH2) ? fabs(H2) : maxH1;
maxH3 = (fabs(H3)>maxH3) ? fabs(H3) : maxH1;
}
double dt = deltat[ind];
//step[ind] = dt*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1) - 0.5*(Dx[0]-Dx[2]) - 0.5*(Dy[0]-Dy[2]) - 0.5*(Dz[0]-Dz[2]);
step[ind] = dt*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1) - 0.5*dt*(maxH1*(Dx[0]-Dx[2]) - maxH2*(Dy[0]-Dy[2]) - maxH3*(Dz[0]-Dz[2]));
//step[ind] = deltat[ind]*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1);
//step[ind] = maxH3;
//step[ind] = Dz[0] - Dz[2];
// step[ind] = Dz[0];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
/*******************************************************************************
* serveral useful gpu functions will be defined in this file to facilitate
* the surface redistance scheme
******************************************************************************/
typedef struct
{
double sR;
double sL;
} double_eno_derivative;
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
__device__ inline
double min_mod(double x, double y)
{
return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y);
}
__device__ inline
double sign(double x)
{
return (x>0) ? 1.0 : -1.0;
}
__device__ inline
bool same_sign(double x, double y)
{
return (x*y>0) || (x==0 && y==0);
}
__device__ inline
void advection_velocity(double & H1, double & H2, double & H3, double sign, double Dx, double Dy, double Dz, double nx, double ny, double nz)
{
double normal_d = nx * Dx + ny + Dy + nz * Dz;
H1 = sign * (Dx - nx * normal_d);
H2 = sign * (Dy - ny * normal_d);
H3 = sign * (Dz - nz * normal_d);
double H_mag = sqrt(H1*H1+H2*H2+H3*H3+1e-6);
H1 = H1/H_mag;
H2 = H2/H_mag;
H3 = H3/H_mag;
}
// convert subindex to linear index
// periodic boundary conditions are assumed
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
/******************************************************************************
* calculate Eno derivatives at node v0: [v4,v1,v0,v2,v3]
******************************************************************************/
__device__ inline
double_eno_derivative eno_derivative( double v4, double v1, double v0, double v2, double v3, double pr, double pl, double ds)
{
double p2m;
double_eno_derivative eno_d;
double p2 = v1 - 2.0 * v0 + v2;
double p2r = v0 - 2.0 * v2 + v3;
p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2);
double vr = (pr==ds) ? v2 : 0;
eno_d.sR = (vr - v0) / pr - pr * p2m;
double p2l = v0 - 2.0 * v1 + v4;
p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2);
double vl = (pl==ds) ? v1 : 0;
eno_d.sL = (v0 - vl) / pl + pl * p2m;
return eno_d;
}
// calculate surface redistance step
// now lsf represents the auxilary level set function(not the level set function)
// inputs : the auxilary level set function, sign of the initial level set function, distance to the interface, normal vectors
__global__
void surface_redistance_step(double * step, double const * lsf, double const * sign, double const * deltat, double const * nx, double const * ny, double const * nz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
double_eno_derivative eno_dx = eno_derivative( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx);
double Dx[3] = {eno_dx.sR, 0, eno_dx.sL};
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
double_eno_derivative eno_dy = eno_derivative( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy);
double Dy[3] = {eno_dy.sR, 0, eno_dy.sL};
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
double_eno_derivative eno_dz = eno_derivative( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz);
double Dz[3] = {eno_dz.sR, 0, eno_dz.sL};
//Forward=-1, None=0, BackWard=1
int const choice_x[26] = {-1,-1,-1,-1, 1, 1, 1, 1, 0, 0, 0, 0,-1,-1, 1, 1,-1,-1, 1, 1, -1, 1, 0, 0, 0, 0};
int const choice_y[26] = {-1,-1, 1, 1,-1,-1, 1, 1, -1,-1, 1, 1, 0, 0, 0, 0,-1, 1,-1, 1, 0, 0,-1, 1, 0, 0};
int const choice_z[26] = {-1, 1,-1, 1,-1, 1,-1, 1, -1, 1,-1, 1,-1, 1,-1, 1, 0, 0, 0, 0, 0, 0, 0, 0,-1, 1};
double Nx = nx[ind];
double Ny = ny[ind];
double Nz = nz[ind];
double Sign = sign[ind];
double dx_c = (Dx[0] + Dx[2]) / 2;
double dy_c = (Dy[0] + Dy[2]) / 2;
double dz_c = (Dz[0] + Dz[2]) / 2;
double maxH1 = 0;
double maxH2 = 0;
double maxH3 = 0;
for(int i=0;i<26;i++){
double dr_x = Dx[choice_x[i]+1];
double dr_y = Dy[choice_y[i]+1];
double dr_z = Dz[choice_z[i]+1];
double H1, H2, H3; // information propagation direction
advection_velocity(H1,H2,H3,Sign,dr_x,dr_y,dr_z,Nx,Ny,Nz);
maxH1 = (fabs(H1)>maxH1) ? fabs(H1) : maxH1;
maxH2 = (fabs(H2)>maxH2) ? fabs(H2) : maxH1;
maxH3 = (fabs(H3)>maxH3) ? fabs(H3) : maxH1;
}
double dt = deltat[ind];
//step[ind] = dt*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1) - 0.5*(Dx[0]-Dx[2]) - 0.5*(Dy[0]-Dy[2]) - 0.5*(Dz[0]-Dz[2]);
step[ind] = dt*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1) - 0.5*dt*(maxH1*(Dx[0]-Dx[2]) - maxH2*(Dy[0]-Dy[2]) - maxH3*(Dz[0]-Dz[2]));
//step[ind] = deltat[ind]*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1);
//step[ind] = maxH3;
//step[ind] = Dz[0] - Dz[2];
// step[ind] = Dz[0];
} | .text
.file "enork2_surface_redistance.hip"
.globl _Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi # -- Begin function _Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.p2align 4, 0x90
.type _Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi,@function
_Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi: # @_Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.cfi_startproc
# %bb.0:
subq $296, %rsp # imm = 0x128
.cfi_def_cfa_offset 304
movq %rdi, 120(%rsp)
movq %rsi, 112(%rsp)
movq %rdx, 104(%rsp)
movq %rcx, 96(%rsp)
movq %r8, 88(%rsp)
movq %r9, 80(%rsp)
movsd %xmm0, 72(%rsp)
movsd %xmm1, 64(%rsp)
movsd %xmm2, 56(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rax
movq %rax, 168(%rsp)
leaq 304(%rsp), %rax
movq %rax, 176(%rsp)
leaq 312(%rsp), %rax
movq %rax, 184(%rsp)
leaq 320(%rsp), %rax
movq %rax, 192(%rsp)
leaq 328(%rsp), %rax
movq %rax, 200(%rsp)
leaq 336(%rsp), %rax
movq %rax, 208(%rsp)
leaq 344(%rsp), %rax
movq %rax, 216(%rsp)
leaq 352(%rsp), %rax
movq %rax, 224(%rsp)
leaq 360(%rsp), %rax
movq %rax, 232(%rsp)
leaq 368(%rsp), %rax
movq %rax, 240(%rsp)
leaq 376(%rsp), %rax
movq %rax, 248(%rsp)
leaq 72(%rsp), %rax
movq %rax, 256(%rsp)
leaq 64(%rsp), %rax
movq %rax, 264(%rsp)
leaq 56(%rsp), %rax
movq %rax, 272(%rsp)
leaq 384(%rsp), %rax
movq %rax, 280(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $312, %rsp # imm = 0x138
.cfi_adjust_cfa_offset -312
retq
.Lfunc_end0:
.size _Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, .Lfunc_end0-_Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi,@object # @_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.section .rodata,"a",@progbits
.globl _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.p2align 3, 0x0
_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi:
.quad _Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.size _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi"
.size .L__unnamed_1, 73
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0008a03d_00000000-6_enork2_surface_redistance.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2037:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2037:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z86__device_stub__Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddiPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.type _Z86__device_stub__Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddiPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, @function
_Z86__device_stub__Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddiPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi:
.LFB2059:
.cfi_startproc
endbr64
subq $376, %rsp
.cfi_def_cfa_offset 384
movq %rdi, 120(%rsp)
movq %rsi, 112(%rsp)
movq %rdx, 104(%rsp)
movq %rcx, 96(%rsp)
movq %r8, 88(%rsp)
movq %r9, 80(%rsp)
movsd %xmm0, 16(%rsp)
movsd %xmm1, 8(%rsp)
movsd %xmm2, (%rsp)
movq 384(%rsp), %rax
movq %rax, 72(%rsp)
movq 392(%rsp), %rax
movq %rax, 64(%rsp)
movq 400(%rsp), %rax
movq %rax, 56(%rsp)
movq 408(%rsp), %rax
movq %rax, 48(%rsp)
movq 416(%rsp), %rax
movq %rax, 40(%rsp)
movq 424(%rsp), %rax
movq %rax, 32(%rsp)
movq 432(%rsp), %rax
movq %rax, 24(%rsp)
movq %fs:40, %rax
movq %rax, 360(%rsp)
xorl %eax, %eax
leaq 120(%rsp), %rax
movq %rax, 192(%rsp)
leaq 112(%rsp), %rax
movq %rax, 200(%rsp)
leaq 104(%rsp), %rax
movq %rax, 208(%rsp)
leaq 96(%rsp), %rax
movq %rax, 216(%rsp)
leaq 88(%rsp), %rax
movq %rax, 224(%rsp)
leaq 80(%rsp), %rax
movq %rax, 232(%rsp)
leaq 72(%rsp), %rax
movq %rax, 240(%rsp)
leaq 64(%rsp), %rax
movq %rax, 248(%rsp)
leaq 56(%rsp), %rax
movq %rax, 256(%rsp)
leaq 48(%rsp), %rax
movq %rax, 264(%rsp)
leaq 40(%rsp), %rax
movq %rax, 272(%rsp)
leaq 32(%rsp), %rax
movq %rax, 280(%rsp)
leaq 24(%rsp), %rax
movq %rax, 288(%rsp)
leaq 440(%rsp), %rax
movq %rax, 296(%rsp)
leaq 448(%rsp), %rax
movq %rax, 304(%rsp)
leaq 456(%rsp), %rax
movq %rax, 312(%rsp)
leaq 16(%rsp), %rax
movq %rax, 320(%rsp)
leaq 8(%rsp), %rax
movq %rax, 328(%rsp)
movq %rsp, %rax
movq %rax, 336(%rsp)
leaq 464(%rsp), %rax
movq %rax, 344(%rsp)
movl $1, 144(%rsp)
movl $1, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl $1, 160(%rsp)
movl $1, 164(%rsp)
leaq 136(%rsp), %rcx
leaq 128(%rsp), %rdx
leaq 156(%rsp), %rsi
leaq 144(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 360(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $376, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 136(%rsp)
.cfi_def_cfa_offset 392
pushq 136(%rsp)
.cfi_def_cfa_offset 400
leaq 208(%rsp), %r9
movq 172(%rsp), %rcx
movl 180(%rsp), %r8d
movq 160(%rsp), %rsi
movl 168(%rsp), %edx
leaq _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 384
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size _Z86__device_stub__Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddiPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, .-_Z86__device_stub__Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddiPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.globl _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.type _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, @function
_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi:
.LFB2060:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 104(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 104(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 104(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 104(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
pushq 104(%rsp)
.cfi_def_cfa_offset 64
pushq 104(%rsp)
.cfi_def_cfa_offset 72
pushq 104(%rsp)
.cfi_def_cfa_offset 80
pushq 104(%rsp)
.cfi_def_cfa_offset 88
pushq 104(%rsp)
.cfi_def_cfa_offset 96
pushq 104(%rsp)
.cfi_def_cfa_offset 104
pushq 104(%rsp)
.cfi_def_cfa_offset 112
call _Z86__device_stub__Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddiPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
addq $104, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, .-_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "enork2_surface_redistance.hip"
.globl _Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi # -- Begin function _Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.p2align 4, 0x90
.type _Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi,@function
_Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi: # @_Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.cfi_startproc
# %bb.0:
subq $296, %rsp # imm = 0x128
.cfi_def_cfa_offset 304
movq %rdi, 120(%rsp)
movq %rsi, 112(%rsp)
movq %rdx, 104(%rsp)
movq %rcx, 96(%rsp)
movq %r8, 88(%rsp)
movq %r9, 80(%rsp)
movsd %xmm0, 72(%rsp)
movsd %xmm1, 64(%rsp)
movsd %xmm2, 56(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rax
movq %rax, 168(%rsp)
leaq 304(%rsp), %rax
movq %rax, 176(%rsp)
leaq 312(%rsp), %rax
movq %rax, 184(%rsp)
leaq 320(%rsp), %rax
movq %rax, 192(%rsp)
leaq 328(%rsp), %rax
movq %rax, 200(%rsp)
leaq 336(%rsp), %rax
movq %rax, 208(%rsp)
leaq 344(%rsp), %rax
movq %rax, 216(%rsp)
leaq 352(%rsp), %rax
movq %rax, 224(%rsp)
leaq 360(%rsp), %rax
movq %rax, 232(%rsp)
leaq 368(%rsp), %rax
movq %rax, 240(%rsp)
leaq 376(%rsp), %rax
movq %rax, 248(%rsp)
leaq 72(%rsp), %rax
movq %rax, 256(%rsp)
leaq 64(%rsp), %rax
movq %rax, 264(%rsp)
leaq 56(%rsp), %rax
movq %rax, 272(%rsp)
leaq 384(%rsp), %rax
movq %rax, 280(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $312, %rsp # imm = 0x138
.cfi_adjust_cfa_offset -312
retq
.Lfunc_end0:
.size _Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, .Lfunc_end0-_Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi,@object # @_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.section .rodata,"a",@progbits
.globl _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.p2align 3, 0x0
_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi:
.quad _Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.size _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi"
.size .L__unnamed_1, 73
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z23surface_redistance_stepPdPKdS1_S1_S1_S1_S1_S1_S1_S1_S1_S1_S1_iiidddi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<stdio.h>
#define Y1(i,j) Y1[((i)*(A))+(j)]
#define Yf(i,j) Yf[((i)*(B1))+(j)]
#define Y2(i,j) Y2[((i)*(C))+(j)]
#define Z1(i,j) Z1[((i)*(C))+(j)]
#define X1(i,j) X1[((i)*(B))+(j)]
#define X2(i,j) X2[((i)*(C))+(j)]
#define Y(i,j) Y[((i)*(B))+(j)]
#define Z(i,j) Z[((i)*(B))+(j)]
//#define I(i,j) I[((i)*(A))+(j)]
#define foo(a,b) b?tanh(a):exp(a)
#define FOOTPRINT_SIZE 64
#define BLOCK_SIZE 32
#define THREADS_PER_BLOCK 32 //for Pointwise calculations
void *myCudaMalloc1(size_t len)
{
void *p;
cudaMalloc(&p, len);
return p;
}
void displayMatrix2 (const char *label, double *m, int rows, int cols)
{
printf ("\n%s:\n", label);
for(int i = 0; i < rows; ++i )
{
for(int j = 0; j < cols; ++j )
printf("%10.5lf\t",m[(i*cols)+j]);
printf ("\n");
}
}
__global__ void MatMulKernel(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB);
//__global__ void MatMulKernel01(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB);
__global__ void cuMinus(double *C, double *A, double *B, int n, double delta=1);
__global__ void cuGradientFunc(double *A, double *B, long n, long n_cols);
__global__ void cuFunc(double *A, double *B, long n, long n_cols, long val);
__global__ void cuDivideByVec(double *C, double *A, double *B, long n, long n_cols);
__global__ void cu_sum(const double* src, double* sum, double *global_mem, const int n);
//---------------------------Helper Host Functions------------------------------------------------------------------------------------------------
void initializeW(double* X1, long A, long B){
/*Initializes the weights*/
long i,j;
for (i=0; i<A;i++)
for (j=0; j<B;j++)
X1(i,j) = ((double)rand() / (double)RAND_MAX) * 0.2 - 0.1;
}
void initializeI(double* X1, long A, long B){
/*Initializes the inputs*/
long i,j;
for (i=0; i<A;i++)
for (j=0; j<B;j++)
X1(i,j) = j%2;
}
void initializeO(double* X1, long A, long B){
/*Initializes the outputs*/
long i,j;
for (i=0; i<A;i++)
for (j=0; j<B;j++)
X1(i,j) = i%2;
}
void mm(double* X2, double* Y, double* Z1, long A, long B, long C){
/*Performs Matrix-Matrix Mulitplication*/
/*
long i,j,k;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
for(k=0; k<C; k++)
{
if(j==0) X2(i,k)=0;
X2(i,k) += Y(i,j) * Z1(j,k);
}
*/
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
bool transA = false, transB = false;
int A_width = B;
int A_height = A;
int B_width = C;
int B_height = B;
//printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width);
int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1);
int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1);
dim3 dimGrid( grid_size_x, grid_size_y);
MatMulKernel<<<dimGrid,dimBlock>>>(X2,Y,Z1,A_width,A_height,B_width,B_height, transA,transB);
}
void mmt(double* X1, double* Y2, double* Z1, long A, long B, long C){
/*Performs Matrix-Transposed Matrix Mulitplication*/
/*
long i,j,k;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
{
X1(i,j)=0;
for(k=0; k<C; k++)
X1(i,j) += Z1(i,k) * Y2(j,k) ; //Z1(i,k)
}
*/
//printf("%d %d %d\n",A,B,C);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
bool transA = false, transB = true;
int A_width = C;
int A_height = A;
int B_width = C;
int B_height = B;
//printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width);
int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1);
int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1);
dim3 dimGrid( grid_size_x, grid_size_y);
MatMulKernel<<<dimGrid,dimBlock>>>(X1,Z1,Y2,A_width,A_height,B_width,B_height, transA,transB);
}
void mtm(double* X2, double* Y1, double* Z1, long A, long B, long C){
/*Performs Transposed Matrix- Matrix Mulitplication*/
/*
long i,j,k;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
for(k=0; k<C; k++)
{
if(j==0) X2(i,k)=0;
X2(i,k) += Y1(j,i) * Z1(j,k);
}
*/
//printf("%d %d %d\n",A,B,C);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
bool transA = true, transB = false;
int A_width = A;
int A_height = B;
int B_width = C;
int B_height = B;
//printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width);
int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1);
int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1);
dim3 dimGrid( grid_size_x, grid_size_y);
MatMulKernel<<<dimGrid,dimBlock>>>(X2,Y1,Z1,A_width,A_height,B_width,B_height, transA,transB);
}
void func(double* X1, double* Yf, long A, long B1, long val){
/*Performs a point-wise operation*/
long B=B1+val;
/* long i,j;
for (i=0; i<A; i++)
for (j=0; j<B1; j++)
X1(i,(j+val)) = foo(Yf(i,j),val); */
long len = A*B1;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuFunc<<<num_blocks, block_size>>>(X1, Yf, len, B1, val);
}
void gradient_func(double* X1, double* Yf, long A, long B){
/*Performs a point-wise operation*/
long B1=B+1;
/*
long i,j;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
X1(i,j) = Yf(i, (j+1))*(1 - pow (tanh (X1(i,j)), 2));
*/
long len = A*B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuGradientFunc<<<num_blocks, block_size>>>(X1, Yf, len, B);
}
void error(double* X1, double* Y, double* Z, long A, long B){
/*Calculates the Error*/
/*
long i,j;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
X1(i,j) = Y(i,j)-Z(i,j);
*/
long len = A*B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuMinus<<<num_blocks, block_size>>>(X1, Y, Z, len);
}
void reduction(double* Y, double* X1, long A, long B){
/*Performs the summation of probabilities*/
/*long i,j;
for (i=0; i<A; i++)
{
X1[i]=0;
for (j=0; j<B; j++)
X1[i] += Y(i,j);
}*/
int len = B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
double * HostX = (double * ) malloc(A * sizeof(double));
double *data;
double *d_partial_sums;
double *global_mem;
data = (double * ) myCudaMalloc1(sizeof(double) * len);
global_mem = (double * ) myCudaMalloc1( sizeof(double)* block_size);
d_partial_sums = (double * ) myCudaMalloc1( sizeof(double)* num_blocks);
for(int i = 0; i < A; ++i){
int tmp_block_size = block_size;
int tmp_num_blocks = num_blocks;
int data_len = len;
cudaMemcpy(data, Y + i * len, data_len * sizeof(double), cudaMemcpyDeviceToDevice);
while(true){
cudaMemset(global_mem, 0, sizeof(double) * tmp_block_size);
cu_sum<<<tmp_num_blocks, tmp_block_size>>>(data, d_partial_sums, global_mem, data_len);
cudaDeviceSynchronize();
data_len = tmp_num_blocks;
if(tmp_num_blocks == 1){
// copy the result back to the host
double host_res = 0;
cudaMemcpy(&host_res, d_partial_sums, sizeof(double), cudaMemcpyDeviceToHost);
HostX[i] = host_res;
break;
}else if(tmp_num_blocks <= block_size){
tmp_block_size = data_len;
tmp_num_blocks = 1;
cudaMemcpy(data, d_partial_sums, data_len * sizeof(double), cudaMemcpyDeviceToDevice);
}else{
tmp_block_size = THREADS_PER_BLOCK;
tmp_num_blocks = (data_len / tmp_block_size) + ((data_len % tmp_block_size) ? 1 : 0);
cudaMemcpy(data, d_partial_sums, data_len * sizeof(double), cudaMemcpyDeviceToDevice);
}
}
}
cudaMemcpy(X1, HostX, A * sizeof(double), cudaMemcpyHostToDevice); //copy back to the device
cudaFree(global_mem);
cudaFree(data);
cudaFree(d_partial_sums);
free(HostX);
//displayMatrix2("HostX", HostX, A, 1);
}
void prob(double* Y,double* Z, double* X1, long A, long B){
/*Computes the normalized exponential*/
/*long i,j;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
Z(i,j) = Y(i,j)/X1[i];*/
long len = A*B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuDivideByVec<<<num_blocks, block_size>>>(Z, Y, X1, len, B);
}
void delta(double* Z, double* Y, long A, long B, double C){
/*Updates the weight matrix*/
/*
long i,j;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
Z(i,j) -= C*Y(i,j);
*/
long len = A*B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuMinus<<<num_blocks, block_size>>>(Z, Z, Y, len, C);
}
//----------------Device kernels---------------------------------
__global__ void MatMulKernel(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB)
{
int thread_row = threadIdx.y;
int thread_col = threadIdx.x;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int Row = block_row * BLOCK_SIZE + thread_row,
Col = block_col * BLOCK_SIZE + thread_col;
int C_width = transB?B_height:B_width;
int C_height = transA?A_width:A_height;
//if(transB && !block_col && !block_row && !thread_col && !thread_row)printf("C: %d %d\n",C_width, C_height);
float Cvalue = 0;
for (int m = 0; m < (transA?A_height:A_width - 1) / BLOCK_SIZE + 1; ++m) {
__shared__ float shared_A[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float shared_B[BLOCK_SIZE][BLOCK_SIZE];
if(transA){
if(BLOCK_SIZE * m + thread_col < A_height && Row < A_width) {
shared_A[thread_row][thread_col] = A[(BLOCK_SIZE * m + thread_col) * A_width + Row];
}else{
shared_A[thread_row][thread_col] = 0;
}
}else{
if(Row < A_height && BLOCK_SIZE * m + thread_col < A_width) {
shared_A[thread_row][thread_col] = A[Row * A_width + BLOCK_SIZE * m + thread_col];
}else{
shared_A[thread_row][thread_col] = 0;
}
}
if(transB){
if( Col < B_height && BLOCK_SIZE * m + thread_row < B_width) {
shared_B[thread_row][thread_col] = B[ Col * B_width + BLOCK_SIZE * m + thread_row];
} else {
shared_B[thread_row][thread_col] = 0;
}
}else{
if(BLOCK_SIZE * m + thread_row < B_height && Col < B_width ) {
shared_B[thread_row][thread_col] = B[ (BLOCK_SIZE * m + thread_row) * B_width + Col];
} else {
shared_B[thread_row][thread_col] = 0;
}
}
// Synchronize to ensure all elements are read
__syncthreads();
#pragma unroll
for(int e=0; e<BLOCK_SIZE; ++e)
Cvalue += shared_A[thread_row][e] * shared_B[e][thread_col];
__syncthreads();
}
if(Row < C_height && Col < C_width) {
C[Row * C_width + Col] = Cvalue;
}
}
__global__ void cuMinus(double *C, double *A, double *B, int n, double delta){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(delta != 1){
C[tid] = A[tid] - B[tid] * delta;
}else{
C[tid] = A[tid] - B[tid];
}
tid += stride;
}
}
__global__ void cuDivideByVec(double *C, double *A, double *B, long n, long n_cols){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
C[tid] = A[tid] / B[tid/n_cols];
tid += stride;
}
}
__global__ void cuGradientFunc(double *A, double *B, long n, long n_cols){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = (1 - pow (tanh (A[tid]), 2)) * B[tid+1 + (tid)/n_cols];
tid += stride;
}
}
__global__ void cuFunc(double *A, double *B, long n, long n_cols, long val){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid+ val*(val+tid/n_cols)] = foo(B[tid],val);
tid += stride;
}
}
__global__ void cu_sum(const double* src, double* sum, double *global_mem, const int n){
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
//for(int i = 0 ; i < n; i++)printf("%lf ",src[i]);
// printf("\n");
double x = 0;
if(tid < n){
x = src[tid];
}
global_mem[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1){
if(threadIdx.x < offset){
// add a partial sum upstream to our own
global_mem[threadIdx.x] += global_mem[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0){
sum[blockIdx.x] = global_mem[0];
}
__syncthreads();
} | .file "tmpxft_0014bdc7_00000000-6_bplKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2073:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2073:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z13myCudaMalloc1m
.type _Z13myCudaMalloc1m, @function
_Z13myCudaMalloc1m:
.LFB2057:
.cfi_startproc
endbr64
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rsi
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
call cudaMalloc@PLT
movq (%rsp), %rax
movq 8(%rsp), %rdx
subq %fs:40, %rdx
jne .L6
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z13myCudaMalloc1m, .-_Z13myCudaMalloc1m
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "\n%s:\n"
.LC1:
.string "%10.5lf\t"
.LC2:
.string "\n"
.text
.globl _Z14displayMatrix2PKcPdii
.type _Z14displayMatrix2PKcPdii, @function
_Z14displayMatrix2PKcPdii:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rsi, 16(%rsp)
movl %edx, %ebx
movl %edx, 12(%rsp)
movl %ecx, %r15d
movq %rdi, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %ebx, %ebx
jle .L7
movl $0, %r14d
movl $0, %r13d
movslq %r15d, %rax
movq %rax, 24(%rsp)
leaq .LC1(%rip), %r12
jmp .L9
.L11:
movslq %r14d, %rax
movq 16(%rsp), %rcx
leaq (%rcx,%rax,8), %rbx
movq 24(%rsp), %rdx
addq %rdx, %rax
leaq (%rcx,%rax,8), %rbp
.L10:
movsd (%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $8, %rbx
cmpq %rbp, %rbx
jne .L10
.L12:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addl %r15d, %r14d
cmpl %r13d, 12(%rsp)
je .L7
.L9:
testl %r15d, %r15d
jg .L11
jmp .L12
.L7:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z14displayMatrix2PKcPdii, .-_Z14displayMatrix2PKcPdii
.globl _Z11initializeWPdll
.type _Z11initializeWPdll, @function
_Z11initializeWPdll:
.LFB2059:
.cfi_startproc
endbr64
testq %rsi, %rsi
jle .L24
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movq %rsi, %r14
movq %rdx, %r12
leaq 0(,%rdx,8), %r15
movq %rdi, %rbp
movl $0, %r13d
jmp .L17
.L18:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
divsd .LC3(%rip), %xmm0
mulsd .LC4(%rip), %xmm0
subsd .LC5(%rip), %xmm0
movsd %xmm0, 0(%rbp,%rbx,8)
addq $1, %rbx
cmpq %rbx, %r12
jne .L18
.L19:
addq $1, %r13
addq %r15, %rbp
cmpq %r13, %r14
je .L15
.L17:
movl $0, %ebx
testq %r12, %r12
jg .L18
jmp .L19
.L15:
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2059:
.size _Z11initializeWPdll, .-_Z11initializeWPdll
.globl _Z11initializeIPdll
.type _Z11initializeIPdll, @function
_Z11initializeIPdll:
.LFB2060:
.cfi_startproc
endbr64
testq %rsi, %rsi
jle .L27
leaq 0(,%rdx,8), %r10
movl $0, %r9d
jmp .L29
.L30:
movq %rax, %r8
shrq $63, %r8
leaq (%rax,%r8), %rcx
andl $1, %ecx
subq %r8, %rcx
pxor %xmm0, %xmm0
cvtsi2sdq %rcx, %xmm0
movsd %xmm0, (%rdi,%rax,8)
addq $1, %rax
cmpq %rax, %rdx
jne .L30
.L31:
addq $1, %r9
addq %r10, %rdi
cmpq %r9, %rsi
je .L27
.L29:
movl $0, %eax
testq %rdx, %rdx
jg .L30
jmp .L31
.L27:
ret
.cfi_endproc
.LFE2060:
.size _Z11initializeIPdll, .-_Z11initializeIPdll
.globl _Z11initializeOPdll
.type _Z11initializeOPdll, @function
_Z11initializeOPdll:
.LFB2061:
.cfi_startproc
endbr64
movq %rsi, %r8
testq %rsi, %rsi
jle .L35
leaq 0(,%rdx,8), %r9
leaq (%rdi,%r9), %rcx
movq %rdx, %r10
negq %r10
salq $3, %r10
movl $0, %esi
jmp .L37
.L39:
movq %rsi, %rdi
shrq $63, %rdi
leaq (%rsi,%rdi), %rax
andl $1, %eax
subq %rdi, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
leaq (%rcx,%r10), %rax
.L38:
movsd %xmm0, (%rax)
addq $8, %rax
cmpq %rcx, %rax
jne .L38
.L40:
addq $1, %rsi
addq %r9, %rcx
cmpq %rsi, %r8
je .L35
.L37:
testq %rdx, %rdx
jg .L39
jmp .L40
.L35:
ret
.cfi_endproc
.LFE2061:
.size _Z11initializeOPdll, .-_Z11initializeOPdll
.globl _Z42__device_stub__Z12MatMulKernelPdS_S_iiiibbPdS_S_iiiibb
.type _Z42__device_stub__Z12MatMulKernelPdS_S_iiiibbPdS_S_iiiibb, @function
_Z42__device_stub__Z12MatMulKernelPdS_S_iiiibbPdS_S_iiiibb:
.LFB2095:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movl 216(%rsp), %eax
movb %al, 8(%rsp)
movl 224(%rsp), %eax
movb %al, 4(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 8(%rsp), %rax
movq %rax, 168(%rsp)
leaq 4(%rsp), %rax
movq %rax, 176(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L46
.L42:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L47
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L46:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z12MatMulKernelPdS_S_iiiibb(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L42
.L47:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2095:
.size _Z42__device_stub__Z12MatMulKernelPdS_S_iiiibbPdS_S_iiiibb, .-_Z42__device_stub__Z12MatMulKernelPdS_S_iiiibbPdS_S_iiiibb
.globl _Z12MatMulKernelPdS_S_iiiibb
.type _Z12MatMulKernelPdS_S_iiiibb, @function
_Z12MatMulKernelPdS_S_iiiibb:
.LFB2096:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movzbl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movzbl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
call _Z42__device_stub__Z12MatMulKernelPdS_S_iiiibbPdS_S_iiiibb
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size _Z12MatMulKernelPdS_S_iiiibb, .-_Z12MatMulKernelPdS_S_iiiibb
.globl _Z3mtmPdS_S_lll
.type _Z3mtmPdS_S_lll, @function
_Z3mtmPdS_S_lll:
.LFB2064:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %r12
movq %rsi, %r13
movq %rdx, %r14
movq %rcx, %rax
movq %r8, %rbp
movq %r9, %rbx
movl $32, 8(%rsp)
movl $32, 12(%rsp)
movl %ecx, %r15d
leal 30(%r9), %edx
movl %r9d, %ecx
subl $1, %ecx
cmovns %ecx, %edx
sarl $5, %edx
addl $1, %edx
movl %edx, 20(%rsp)
leal 30(%rax), %edx
subl $1, %eax
cmovs %edx, %eax
sarl $5, %eax
addl $1, %eax
movl %eax, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 8(%rsp), %rdx
movl $1, %ecx
movq 20(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L50:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
subq $8, %rsp
.cfi_def_cfa_offset 104
pushq $0
.cfi_def_cfa_offset 112
pushq $1
.cfi_def_cfa_offset 120
pushq %rbp
.cfi_def_cfa_offset 128
movl %ebx, %r9d
movl %ebp, %r8d
movl %r15d, %ecx
movq %r14, %rdx
movq %r13, %rsi
movq %r12, %rdi
call _Z42__device_stub__Z12MatMulKernelPdS_S_iiiibbPdS_S_iiiibb
addq $32, %rsp
.cfi_def_cfa_offset 96
jmp .L50
.cfi_endproc
.LFE2064:
.size _Z3mtmPdS_S_lll, .-_Z3mtmPdS_S_lll
.globl _Z3mmtPdS_S_lll
.type _Z3mmtPdS_S_lll, @function
_Z3mmtPdS_S_lll:
.LFB2063:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %r13
movq %rsi, %r15
movq %rdx, %r14
movq %rcx, %rbx
movq %r8, %rbp
movq %r9, %r12
movl $32, 8(%rsp)
movl $32, 12(%rsp)
leal 30(%r8), %eax
movl %r8d, %edx
subl $1, %edx
cmovns %edx, %eax
sarl $5, %eax
addl $1, %eax
movl %eax, 20(%rsp)
leal 30(%rcx), %eax
movl %ecx, %edx
subl $1, %edx
cmovns %edx, %eax
sarl $5, %eax
addl $1, %eax
movl %eax, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 8(%rsp), %rdx
movl $1, %ecx
movq 20(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L57
.L54:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L57:
.cfi_restore_state
movl %r12d, %ecx
subq $8, %rsp
.cfi_def_cfa_offset 104
pushq $1
.cfi_def_cfa_offset 112
pushq $0
.cfi_def_cfa_offset 120
pushq %rbp
.cfi_def_cfa_offset 128
movl %r12d, %r9d
movl %ebx, %r8d
movq %r15, %rdx
movq %r14, %rsi
movq %r13, %rdi
call _Z42__device_stub__Z12MatMulKernelPdS_S_iiiibbPdS_S_iiiibb
addq $32, %rsp
.cfi_def_cfa_offset 96
jmp .L54
.cfi_endproc
.LFE2063:
.size _Z3mmtPdS_S_lll, .-_Z3mmtPdS_S_lll
.globl _Z2mmPdS_S_lll
.type _Z2mmPdS_S_lll, @function
_Z2mmPdS_S_lll:
.LFB2062:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %r13
movq %rsi, %r14
movq %rdx, %r15
movq %rcx, %rbx
movq %r8, %r12
movq %r9, %rbp
movl $32, 8(%rsp)
movl $32, 12(%rsp)
leal 30(%r9), %eax
movl %r9d, %edx
subl $1, %edx
cmovns %edx, %eax
sarl $5, %eax
addl $1, %eax
movl %eax, 20(%rsp)
leal 30(%rcx), %eax
movl %ecx, %edx
subl $1, %edx
cmovns %edx, %eax
sarl $5, %eax
addl $1, %eax
movl %eax, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 8(%rsp), %rdx
movl $1, %ecx
movq 20(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L61
.L58:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L61:
.cfi_restore_state
movl %r12d, %ecx
subq $8, %rsp
.cfi_def_cfa_offset 104
pushq $0
.cfi_def_cfa_offset 112
pushq $0
.cfi_def_cfa_offset 120
pushq %r12
.cfi_def_cfa_offset 128
movl %ebp, %r9d
movl %ebx, %r8d
movq %r15, %rdx
movq %r14, %rsi
movq %r13, %rdi
call _Z42__device_stub__Z12MatMulKernelPdS_S_iiiibbPdS_S_iiiibb
addq $32, %rsp
.cfi_def_cfa_offset 96
jmp .L58
.cfi_endproc
.LFE2062:
.size _Z2mmPdS_S_lll, .-_Z2mmPdS_S_lll
.globl _Z32__device_stub__Z7cuMinusPdS_S_idPdS_S_id
.type _Z32__device_stub__Z7cuMinusPdS_S_idPdS_S_id, @function
_Z32__device_stub__Z7cuMinusPdS_S_idPdS_S_id:
.LFB2097:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movsd %xmm0, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L66
.L62:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L67
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L66:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z7cuMinusPdS_S_id(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L62
.L67:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2097:
.size _Z32__device_stub__Z7cuMinusPdS_S_idPdS_S_id, .-_Z32__device_stub__Z7cuMinusPdS_S_idPdS_S_id
.globl _Z7cuMinusPdS_S_id
.type _Z7cuMinusPdS_S_id, @function
_Z7cuMinusPdS_S_id:
.LFB2098:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z7cuMinusPdS_S_idPdS_S_id
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _Z7cuMinusPdS_S_id, .-_Z7cuMinusPdS_S_id
.globl _Z5deltaPdS_lld
.type _Z5deltaPdS_lld, @function
_Z5deltaPdS_lld:
.LFB2070:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $48, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbp
movq %rsi, %r12
movsd %xmm0, 8(%rsp)
movq %rdx, %rbx
imulq %rcx, %rbx
movl $32, 36(%rsp)
movl $1, 40(%rsp)
testb $31, %bl
setne %al
movzbl %al, %eax
movq %rbx, %rdx
shrq $5, %rdx
addq %rdx, %rax
movl %eax, 24(%rsp)
movl $1, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L73
.L70:
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L73:
.cfi_restore_state
movsd 8(%rsp), %xmm0
movl %ebx, %ecx
movq %r12, %rdx
movq %rbp, %rsi
movq %rbp, %rdi
call _Z32__device_stub__Z7cuMinusPdS_S_idPdS_S_id
jmp .L70
.cfi_endproc
.LFE2070:
.size _Z5deltaPdS_lld, .-_Z5deltaPdS_lld
.globl _Z5errorPdS_S_ll
.type _Z5errorPdS_S_ll, @function
_Z5errorPdS_S_ll:
.LFB2067:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbp
movq %rsi, %r12
movq %rdx, %r13
movq %rcx, %rbx
imulq %r8, %rbx
movl $32, 20(%rsp)
movl $1, 24(%rsp)
testb $31, %bl
setne %al
movzbl %al, %eax
movq %rbx, %rdx
shrq $5, %rdx
addq %rdx, %rax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L77
.L74:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L77:
.cfi_restore_state
movsd .LC6(%rip), %xmm0
movl %ebx, %ecx
movq %r13, %rdx
movq %r12, %rsi
movq %rbp, %rdi
call _Z32__device_stub__Z7cuMinusPdS_S_idPdS_S_id
jmp .L74
.cfi_endproc
.LFE2067:
.size _Z5errorPdS_S_ll, .-_Z5errorPdS_S_ll
.globl _Z39__device_stub__Z13cuDivideByVecPdS_S_llPdS_S_ll
.type _Z39__device_stub__Z13cuDivideByVecPdS_S_llPdS_S_ll, @function
_Z39__device_stub__Z13cuDivideByVecPdS_S_llPdS_S_ll:
.LFB2099:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L82
.L78:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L83
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L82:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z13cuDivideByVecPdS_S_ll(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L78
.L83:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2099:
.size _Z39__device_stub__Z13cuDivideByVecPdS_S_llPdS_S_ll, .-_Z39__device_stub__Z13cuDivideByVecPdS_S_llPdS_S_ll
.globl _Z13cuDivideByVecPdS_S_ll
.type _Z13cuDivideByVecPdS_S_ll, @function
_Z13cuDivideByVecPdS_S_ll:
.LFB2100:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z13cuDivideByVecPdS_S_llPdS_S_ll
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2100:
.size _Z13cuDivideByVecPdS_S_ll, .-_Z13cuDivideByVecPdS_S_ll
.globl _Z4probPdS_S_ll
.type _Z4probPdS_S_ll, @function
_Z4probPdS_S_ll:
.LFB2069:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $32, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %r13
movq %rsi, %r12
movq %rdx, %r14
movq %r8, %rbp
movq %rcx, %rbx
imulq %r8, %rbx
movl $32, 20(%rsp)
movl $1, 24(%rsp)
testb $31, %bl
setne %al
movzbl %al, %eax
movq %rbx, %rdx
shrq $5, %rdx
addq %rdx, %rax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L89
.L86:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L89:
.cfi_restore_state
movq %rbp, %r8
movq %rbx, %rcx
movq %r14, %rdx
movq %r13, %rsi
movq %r12, %rdi
call _Z39__device_stub__Z13cuDivideByVecPdS_S_llPdS_S_ll
jmp .L86
.cfi_endproc
.LFE2069:
.size _Z4probPdS_S_ll, .-_Z4probPdS_S_ll
.globl _Z38__device_stub__Z14cuGradientFuncPdS_llPdS_ll
.type _Z38__device_stub__Z14cuGradientFuncPdS_llPdS_ll, @function
_Z38__device_stub__Z14cuGradientFuncPdS_llPdS_ll:
.LFB2101:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L94
.L90:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L95
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L94:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14cuGradientFuncPdS_ll(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L90
.L95:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2101:
.size _Z38__device_stub__Z14cuGradientFuncPdS_llPdS_ll, .-_Z38__device_stub__Z14cuGradientFuncPdS_llPdS_ll
.globl _Z14cuGradientFuncPdS_ll
.type _Z14cuGradientFuncPdS_ll, @function
_Z14cuGradientFuncPdS_ll:
.LFB2102:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z14cuGradientFuncPdS_llPdS_ll
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2102:
.size _Z14cuGradientFuncPdS_ll, .-_Z14cuGradientFuncPdS_ll
.globl _Z13gradient_funcPdS_ll
.type _Z13gradient_funcPdS_ll, @function
_Z13gradient_funcPdS_ll:
.LFB2066:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %r12
movq %rsi, %r13
movq %rcx, %rbp
movq %rdx, %rbx
imulq %rcx, %rbx
movl $32, 20(%rsp)
movl $1, 24(%rsp)
testb $31, %bl
setne %al
movzbl %al, %eax
movq %rbx, %rdx
shrq $5, %rdx
addq %rdx, %rax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L101
.L98:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L101:
.cfi_restore_state
movq %rbp, %rcx
movq %rbx, %rdx
movq %r13, %rsi
movq %r12, %rdi
call _Z38__device_stub__Z14cuGradientFuncPdS_llPdS_ll
jmp .L98
.cfi_endproc
.LFE2066:
.size _Z13gradient_funcPdS_ll, .-_Z13gradient_funcPdS_ll
.globl _Z30__device_stub__Z6cuFuncPdS_lllPdS_lll
.type _Z30__device_stub__Z6cuFuncPdS_lllPdS_lll, @function
_Z30__device_stub__Z6cuFuncPdS_lllPdS_lll:
.LFB2103:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L106
.L102:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L107
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L106:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z6cuFuncPdS_lll(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L102
.L107:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2103:
.size _Z30__device_stub__Z6cuFuncPdS_lllPdS_lll, .-_Z30__device_stub__Z6cuFuncPdS_lllPdS_lll
.globl _Z6cuFuncPdS_lll
.type _Z6cuFuncPdS_lll, @function
_Z6cuFuncPdS_lll:
.LFB2104:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6cuFuncPdS_lllPdS_lll
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2104:
.size _Z6cuFuncPdS_lll, .-_Z6cuFuncPdS_lll
.globl _Z4funcPdS_lll
.type _Z4funcPdS_lll, @function
_Z4funcPdS_lll:
.LFB2065:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $32, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %r12
movq %rsi, %r13
movq %rcx, %rbp
movq %r8, %r14
movq %rdx, %rbx
imulq %rcx, %rbx
movl $32, 20(%rsp)
movl $1, 24(%rsp)
testb $31, %bl
setne %al
movzbl %al, %eax
movq %rbx, %rdx
shrq $5, %rdx
addq %rdx, %rax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L113
.L110:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L113:
.cfi_restore_state
movq %r14, %r8
movq %rbp, %rcx
movq %rbx, %rdx
movq %r13, %rsi
movq %r12, %rdi
call _Z30__device_stub__Z6cuFuncPdS_lllPdS_lll
jmp .L110
.cfi_endproc
.LFE2065:
.size _Z4funcPdS_lll, .-_Z4funcPdS_lll
.globl _Z32__device_stub__Z6cu_sumPKdPdS1_iPKdPdS1_i
.type _Z32__device_stub__Z6cu_sumPKdPdS1_iPKdPdS1_i, @function
_Z32__device_stub__Z6cu_sumPKdPdS1_iPKdPdS1_i:
.LFB2105:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L118
.L114:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L119
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L118:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6cu_sumPKdPdS1_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L114
.L119:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2105:
.size _Z32__device_stub__Z6cu_sumPKdPdS1_iPKdPdS1_i, .-_Z32__device_stub__Z6cu_sumPKdPdS1_iPKdPdS1_i
.globl _Z6cu_sumPKdPdS1_i
.type _Z6cu_sumPKdPdS1_i, @function
_Z6cu_sumPKdPdS1_i:
.LFB2106:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z6cu_sumPKdPdS1_iPKdPdS1_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2106:
.size _Z6cu_sumPKdPdS1_i, .-_Z6cu_sumPKdPdS1_i
.globl _Z9reductionPdS_ll
.type _Z9reductionPdS_ll, @function
_Z9reductionPdS_ll:
.LFB2068:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $136, %rsp
.cfi_def_cfa_offset 192
movq %rdi, %rbx
movq %rsi, 64(%rsp)
movq %rdx, %rsi
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
movl %ecx, 52(%rsp)
movslq %ecx, %rbp
testb $31, %cl
setne %dl
movzbl %dl, %edx
movq %rbp, %rax
shrq $5, %rax
leaq (%rdx,%rax), %r15
movq %r15, 56(%rsp)
movq %rsi, 40(%rsp)
leaq 0(,%rsi,8), %r12
movq %r12, %rdi
call malloc@PLT
movq %rax, 32(%rsp)
leaq 0(,%rbp,8), %rax
movq %rax, 8(%rsp)
movq %rax, %rdi
call _Z13myCudaMalloc1m
movq %rax, %r14
movl $256, %edi
call _Z13myCudaMalloc1m
movq %rax, %r13
leaq 0(,%r15,8), %rdi
call _Z13myCudaMalloc1m
movq %rax, %r15
cmpq $0, 40(%rsp)
jle .L123
movq %rbx, 16(%rsp)
movl $0, %ebp
movq %r12, 72(%rsp)
jmp .L127
.L135:
movl 48(%rsp), %ecx
movq %r13, %rdx
movq %r15, %rsi
movq %r14, %rdi
call _Z32__device_stub__Z6cu_sumPKdPdS1_iPKdPdS1_i
jmp .L125
.L136:
movq 24(%rsp), %rbp
movq $0x000000000, 104(%rsp)
leaq 104(%rsp), %rdi
movl $2, %ecx
movl $8, %edx
movq %r15, %rsi
call cudaMemcpy@PLT
movsd 104(%rsp), %xmm0
movq 32(%rsp), %rax
movsd %xmm0, (%rax,%rbp,8)
addq $1, %rbp
movq 8(%rsp), %rcx
addq %rcx, 16(%rsp)
cmpq %rbp, 40(%rsp)
je .L133
.L127:
movl 56(%rsp), %ebx
movl $3, %ecx
movq 8(%rsp), %rdx
movq 16(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
movl 52(%rsp), %eax
movl %eax, 48(%rsp)
movl $32, %r12d
movq %rbp, 24(%rsp)
jmp .L124
.L133:
movq 72(%rsp), %r12
.L123:
movl $1, %ecx
movq %r12, %rdx
movq 32(%rsp), %rbx
movq %rbx, %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movq %r13, %rdi
call cudaFree@PLT
movq %r14, %rdi
call cudaFree@PLT
movq %r15, %rdi
call cudaFree@PLT
movq %rbx, %rdi
call free@PLT
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L134
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L128:
.cfi_restore_state
leal 31(%rbx), %ebp
testl %ebx, %ebx
cmovns %ebx, %ebp
sarl $5, %ebp
movl %ebx, %eax
andl $31, %eax
cmpl $1, %eax
sbbl $-1, %ebp
salq $3, %rdx
movl $3, %ecx
movq %r15, %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
movl $32, %r12d
.L129:
movl %ebx, 48(%rsp)
movl %ebp, %ebx
.L124:
movslq %r12d, %rdx
salq $3, %rdx
movl $0, %esi
movq %r13, %rdi
call cudaMemset@PLT
movl %r12d, 104(%rsp)
movl $1, 108(%rsp)
movl $1, 112(%rsp)
movl %ebx, 92(%rsp)
movl $1, 96(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 104(%rsp), %rdx
movl $1, %ecx
movq 92(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L135
.L125:
call cudaDeviceSynchronize@PLT
cmpl $1, %ebx
je .L136
movslq %ebx, %rdx
cmpq $32, %rdx
ja .L128
salq $3, %rdx
movl $3, %ecx
movq %r15, %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
movl %ebx, %r12d
movl $1, %ebp
jmp .L129
.L134:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2068:
.size _Z9reductionPdS_ll, .-_Z9reductionPdS_ll
.section .rodata.str1.1
.LC8:
.string "_Z6cu_sumPKdPdS1_i"
.LC9:
.string "_Z6cuFuncPdS_lll"
.LC10:
.string "_Z14cuGradientFuncPdS_ll"
.LC11:
.string "_Z13cuDivideByVecPdS_S_ll"
.LC12:
.string "_Z7cuMinusPdS_S_id"
.LC13:
.string "_Z12MatMulKernelPdS_S_iiiibb"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2108:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z6cu_sumPKdPdS1_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z6cuFuncPdS_lll(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z14cuGradientFuncPdS_ll(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z13cuDivideByVecPdS_S_ll(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z7cuMinusPdS_S_id(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z12MatMulKernelPdS_S_iiiibb(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2108:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC3:
.long -4194304
.long 1105199103
.align 8
.LC4:
.long -1717986918
.long 1070176665
.align 8
.LC5:
.long -1717986918
.long 1069128089
.align 8
.LC6:
.long 0
.long 1072693248
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<stdio.h>
#define Y1(i,j) Y1[((i)*(A))+(j)]
#define Yf(i,j) Yf[((i)*(B1))+(j)]
#define Y2(i,j) Y2[((i)*(C))+(j)]
#define Z1(i,j) Z1[((i)*(C))+(j)]
#define X1(i,j) X1[((i)*(B))+(j)]
#define X2(i,j) X2[((i)*(C))+(j)]
#define Y(i,j) Y[((i)*(B))+(j)]
#define Z(i,j) Z[((i)*(B))+(j)]
//#define I(i,j) I[((i)*(A))+(j)]
#define foo(a,b) b?tanh(a):exp(a)
#define FOOTPRINT_SIZE 64
#define BLOCK_SIZE 32
#define THREADS_PER_BLOCK 32 //for Pointwise calculations
void *myCudaMalloc1(size_t len)
{
void *p;
cudaMalloc(&p, len);
return p;
}
void displayMatrix2 (const char *label, double *m, int rows, int cols)
{
printf ("\n%s:\n", label);
for(int i = 0; i < rows; ++i )
{
for(int j = 0; j < cols; ++j )
printf("%10.5lf\t",m[(i*cols)+j]);
printf ("\n");
}
}
__global__ void MatMulKernel(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB);
//__global__ void MatMulKernel01(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB);
__global__ void cuMinus(double *C, double *A, double *B, int n, double delta=1);
__global__ void cuGradientFunc(double *A, double *B, long n, long n_cols);
__global__ void cuFunc(double *A, double *B, long n, long n_cols, long val);
__global__ void cuDivideByVec(double *C, double *A, double *B, long n, long n_cols);
__global__ void cu_sum(const double* src, double* sum, double *global_mem, const int n);
//---------------------------Helper Host Functions------------------------------------------------------------------------------------------------
void initializeW(double* X1, long A, long B){
/*Initializes the weights*/
long i,j;
for (i=0; i<A;i++)
for (j=0; j<B;j++)
X1(i,j) = ((double)rand() / (double)RAND_MAX) * 0.2 - 0.1;
}
void initializeI(double* X1, long A, long B){
/*Initializes the inputs*/
long i,j;
for (i=0; i<A;i++)
for (j=0; j<B;j++)
X1(i,j) = j%2;
}
void initializeO(double* X1, long A, long B){
/*Initializes the outputs*/
long i,j;
for (i=0; i<A;i++)
for (j=0; j<B;j++)
X1(i,j) = i%2;
}
void mm(double* X2, double* Y, double* Z1, long A, long B, long C){
/*Performs Matrix-Matrix Mulitplication*/
/*
long i,j,k;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
for(k=0; k<C; k++)
{
if(j==0) X2(i,k)=0;
X2(i,k) += Y(i,j) * Z1(j,k);
}
*/
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
bool transA = false, transB = false;
int A_width = B;
int A_height = A;
int B_width = C;
int B_height = B;
//printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width);
int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1);
int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1);
dim3 dimGrid( grid_size_x, grid_size_y);
MatMulKernel<<<dimGrid,dimBlock>>>(X2,Y,Z1,A_width,A_height,B_width,B_height, transA,transB);
}
void mmt(double* X1, double* Y2, double* Z1, long A, long B, long C){
/*Performs Matrix-Transposed Matrix Mulitplication*/
/*
long i,j,k;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
{
X1(i,j)=0;
for(k=0; k<C; k++)
X1(i,j) += Z1(i,k) * Y2(j,k) ; //Z1(i,k)
}
*/
//printf("%d %d %d\n",A,B,C);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
bool transA = false, transB = true;
int A_width = C;
int A_height = A;
int B_width = C;
int B_height = B;
//printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width);
int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1);
int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1);
dim3 dimGrid( grid_size_x, grid_size_y);
MatMulKernel<<<dimGrid,dimBlock>>>(X1,Z1,Y2,A_width,A_height,B_width,B_height, transA,transB);
}
void mtm(double* X2, double* Y1, double* Z1, long A, long B, long C){
/*Performs Transposed Matrix- Matrix Mulitplication*/
/*
long i,j,k;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
for(k=0; k<C; k++)
{
if(j==0) X2(i,k)=0;
X2(i,k) += Y1(j,i) * Z1(j,k);
}
*/
//printf("%d %d %d\n",A,B,C);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
bool transA = true, transB = false;
int A_width = A;
int A_height = B;
int B_width = C;
int B_height = B;
//printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width);
int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1);
int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1);
dim3 dimGrid( grid_size_x, grid_size_y);
MatMulKernel<<<dimGrid,dimBlock>>>(X2,Y1,Z1,A_width,A_height,B_width,B_height, transA,transB);
}
void func(double* X1, double* Yf, long A, long B1, long val){
/*Performs a point-wise operation*/
long B=B1+val;
/* long i,j;
for (i=0; i<A; i++)
for (j=0; j<B1; j++)
X1(i,(j+val)) = foo(Yf(i,j),val); */
long len = A*B1;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuFunc<<<num_blocks, block_size>>>(X1, Yf, len, B1, val);
}
void gradient_func(double* X1, double* Yf, long A, long B){
/*Performs a point-wise operation*/
long B1=B+1;
/*
long i,j;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
X1(i,j) = Yf(i, (j+1))*(1 - pow (tanh (X1(i,j)), 2));
*/
long len = A*B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuGradientFunc<<<num_blocks, block_size>>>(X1, Yf, len, B);
}
void error(double* X1, double* Y, double* Z, long A, long B){
/*Calculates the Error*/
/*
long i,j;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
X1(i,j) = Y(i,j)-Z(i,j);
*/
long len = A*B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuMinus<<<num_blocks, block_size>>>(X1, Y, Z, len);
}
void reduction(double* Y, double* X1, long A, long B){
/*Performs the summation of probabilities*/
/*long i,j;
for (i=0; i<A; i++)
{
X1[i]=0;
for (j=0; j<B; j++)
X1[i] += Y(i,j);
}*/
int len = B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
double * HostX = (double * ) malloc(A * sizeof(double));
double *data;
double *d_partial_sums;
double *global_mem;
data = (double * ) myCudaMalloc1(sizeof(double) * len);
global_mem = (double * ) myCudaMalloc1( sizeof(double)* block_size);
d_partial_sums = (double * ) myCudaMalloc1( sizeof(double)* num_blocks);
for(int i = 0; i < A; ++i){
int tmp_block_size = block_size;
int tmp_num_blocks = num_blocks;
int data_len = len;
cudaMemcpy(data, Y + i * len, data_len * sizeof(double), cudaMemcpyDeviceToDevice);
while(true){
cudaMemset(global_mem, 0, sizeof(double) * tmp_block_size);
cu_sum<<<tmp_num_blocks, tmp_block_size>>>(data, d_partial_sums, global_mem, data_len);
cudaDeviceSynchronize();
data_len = tmp_num_blocks;
if(tmp_num_blocks == 1){
// copy the result back to the host
double host_res = 0;
cudaMemcpy(&host_res, d_partial_sums, sizeof(double), cudaMemcpyDeviceToHost);
HostX[i] = host_res;
break;
}else if(tmp_num_blocks <= block_size){
tmp_block_size = data_len;
tmp_num_blocks = 1;
cudaMemcpy(data, d_partial_sums, data_len * sizeof(double), cudaMemcpyDeviceToDevice);
}else{
tmp_block_size = THREADS_PER_BLOCK;
tmp_num_blocks = (data_len / tmp_block_size) + ((data_len % tmp_block_size) ? 1 : 0);
cudaMemcpy(data, d_partial_sums, data_len * sizeof(double), cudaMemcpyDeviceToDevice);
}
}
}
cudaMemcpy(X1, HostX, A * sizeof(double), cudaMemcpyHostToDevice); //copy back to the device
cudaFree(global_mem);
cudaFree(data);
cudaFree(d_partial_sums);
free(HostX);
//displayMatrix2("HostX", HostX, A, 1);
}
void prob(double* Y,double* Z, double* X1, long A, long B){
/*Computes the normalized exponential*/
/*long i,j;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
Z(i,j) = Y(i,j)/X1[i];*/
long len = A*B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuDivideByVec<<<num_blocks, block_size>>>(Z, Y, X1, len, B);
}
void delta(double* Z, double* Y, long A, long B, double C){
/*Updates the weight matrix*/
/*
long i,j;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
Z(i,j) -= C*Y(i,j);
*/
long len = A*B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuMinus<<<num_blocks, block_size>>>(Z, Z, Y, len, C);
}
//----------------Device kernels---------------------------------
__global__ void MatMulKernel(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB)
{
int thread_row = threadIdx.y;
int thread_col = threadIdx.x;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int Row = block_row * BLOCK_SIZE + thread_row,
Col = block_col * BLOCK_SIZE + thread_col;
int C_width = transB?B_height:B_width;
int C_height = transA?A_width:A_height;
//if(transB && !block_col && !block_row && !thread_col && !thread_row)printf("C: %d %d\n",C_width, C_height);
float Cvalue = 0;
for (int m = 0; m < (transA?A_height:A_width - 1) / BLOCK_SIZE + 1; ++m) {
__shared__ float shared_A[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float shared_B[BLOCK_SIZE][BLOCK_SIZE];
if(transA){
if(BLOCK_SIZE * m + thread_col < A_height && Row < A_width) {
shared_A[thread_row][thread_col] = A[(BLOCK_SIZE * m + thread_col) * A_width + Row];
}else{
shared_A[thread_row][thread_col] = 0;
}
}else{
if(Row < A_height && BLOCK_SIZE * m + thread_col < A_width) {
shared_A[thread_row][thread_col] = A[Row * A_width + BLOCK_SIZE * m + thread_col];
}else{
shared_A[thread_row][thread_col] = 0;
}
}
if(transB){
if( Col < B_height && BLOCK_SIZE * m + thread_row < B_width) {
shared_B[thread_row][thread_col] = B[ Col * B_width + BLOCK_SIZE * m + thread_row];
} else {
shared_B[thread_row][thread_col] = 0;
}
}else{
if(BLOCK_SIZE * m + thread_row < B_height && Col < B_width ) {
shared_B[thread_row][thread_col] = B[ (BLOCK_SIZE * m + thread_row) * B_width + Col];
} else {
shared_B[thread_row][thread_col] = 0;
}
}
// Synchronize to ensure all elements are read
__syncthreads();
#pragma unroll
for(int e=0; e<BLOCK_SIZE; ++e)
Cvalue += shared_A[thread_row][e] * shared_B[e][thread_col];
__syncthreads();
}
if(Row < C_height && Col < C_width) {
C[Row * C_width + Col] = Cvalue;
}
}
__global__ void cuMinus(double *C, double *A, double *B, int n, double delta){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(delta != 1){
C[tid] = A[tid] - B[tid] * delta;
}else{
C[tid] = A[tid] - B[tid];
}
tid += stride;
}
}
__global__ void cuDivideByVec(double *C, double *A, double *B, long n, long n_cols){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
C[tid] = A[tid] / B[tid/n_cols];
tid += stride;
}
}
__global__ void cuGradientFunc(double *A, double *B, long n, long n_cols){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = (1 - pow (tanh (A[tid]), 2)) * B[tid+1 + (tid)/n_cols];
tid += stride;
}
}
__global__ void cuFunc(double *A, double *B, long n, long n_cols, long val){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid+ val*(val+tid/n_cols)] = foo(B[tid],val);
tid += stride;
}
}
__global__ void cu_sum(const double* src, double* sum, double *global_mem, const int n){
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
//for(int i = 0 ; i < n; i++)printf("%lf ",src[i]);
// printf("\n");
double x = 0;
if(tid < n){
x = src[tid];
}
global_mem[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1){
if(threadIdx.x < offset){
// add a partial sum upstream to our own
global_mem[threadIdx.x] += global_mem[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0){
sum[blockIdx.x] = global_mem[0];
}
__syncthreads();
} | #include <hip/hip_runtime.h>
#include<stdio.h>
#define Y1(i,j) Y1[((i)*(A))+(j)]
#define Yf(i,j) Yf[((i)*(B1))+(j)]
#define Y2(i,j) Y2[((i)*(C))+(j)]
#define Z1(i,j) Z1[((i)*(C))+(j)]
#define X1(i,j) X1[((i)*(B))+(j)]
#define X2(i,j) X2[((i)*(C))+(j)]
#define Y(i,j) Y[((i)*(B))+(j)]
#define Z(i,j) Z[((i)*(B))+(j)]
//#define I(i,j) I[((i)*(A))+(j)]
#define foo(a,b) b?tanh(a):exp(a)
#define FOOTPRINT_SIZE 64
#define BLOCK_SIZE 32
#define THREADS_PER_BLOCK 32 //for Pointwise calculations
void *myCudaMalloc1(size_t len)
{
void *p;
hipMalloc(&p, len);
return p;
}
void displayMatrix2 (const char *label, double *m, int rows, int cols)
{
printf ("\n%s:\n", label);
for(int i = 0; i < rows; ++i )
{
for(int j = 0; j < cols; ++j )
printf("%10.5lf\t",m[(i*cols)+j]);
printf ("\n");
}
}
__global__ void MatMulKernel(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB);
//__global__ void MatMulKernel01(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB);
__global__ void cuMinus(double *C, double *A, double *B, int n, double delta=1);
__global__ void cuGradientFunc(double *A, double *B, long n, long n_cols);
__global__ void cuFunc(double *A, double *B, long n, long n_cols, long val);
__global__ void cuDivideByVec(double *C, double *A, double *B, long n, long n_cols);
__global__ void cu_sum(const double* src, double* sum, double *global_mem, const int n);
//---------------------------Helper Host Functions------------------------------------------------------------------------------------------------
void initializeW(double* X1, long A, long B){
/*Initializes the weights*/
long i,j;
for (i=0; i<A;i++)
for (j=0; j<B;j++)
X1(i,j) = ((double)rand() / (double)RAND_MAX) * 0.2 - 0.1;
}
void initializeI(double* X1, long A, long B){
/*Initializes the inputs*/
long i,j;
for (i=0; i<A;i++)
for (j=0; j<B;j++)
X1(i,j) = j%2;
}
void initializeO(double* X1, long A, long B){
/*Initializes the outputs*/
long i,j;
for (i=0; i<A;i++)
for (j=0; j<B;j++)
X1(i,j) = i%2;
}
void mm(double* X2, double* Y, double* Z1, long A, long B, long C){
/*Performs Matrix-Matrix Mulitplication*/
/*
long i,j,k;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
for(k=0; k<C; k++)
{
if(j==0) X2(i,k)=0;
X2(i,k) += Y(i,j) * Z1(j,k);
}
*/
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
bool transA = false, transB = false;
int A_width = B;
int A_height = A;
int B_width = C;
int B_height = B;
//printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width);
int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1);
int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1);
dim3 dimGrid( grid_size_x, grid_size_y);
MatMulKernel<<<dimGrid,dimBlock>>>(X2,Y,Z1,A_width,A_height,B_width,B_height, transA,transB);
}
void mmt(double* X1, double* Y2, double* Z1, long A, long B, long C){
/*Performs Matrix-Transposed Matrix Mulitplication*/
/*
long i,j,k;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
{
X1(i,j)=0;
for(k=0; k<C; k++)
X1(i,j) += Z1(i,k) * Y2(j,k) ; //Z1(i,k)
}
*/
//printf("%d %d %d\n",A,B,C);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
bool transA = false, transB = true;
int A_width = C;
int A_height = A;
int B_width = C;
int B_height = B;
//printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width);
int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1);
int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1);
dim3 dimGrid( grid_size_x, grid_size_y);
MatMulKernel<<<dimGrid,dimBlock>>>(X1,Z1,Y2,A_width,A_height,B_width,B_height, transA,transB);
}
void mtm(double* X2, double* Y1, double* Z1, long A, long B, long C){
/*Performs Transposed Matrix- Matrix Mulitplication*/
/*
long i,j,k;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
for(k=0; k<C; k++)
{
if(j==0) X2(i,k)=0;
X2(i,k) += Y1(j,i) * Z1(j,k);
}
*/
//printf("%d %d %d\n",A,B,C);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
bool transA = true, transB = false;
int A_width = A;
int A_height = B;
int B_width = C;
int B_height = B;
//printf("%dx%d %dx%d\n", A_height, A_width, B_height, B_width);
int grid_size_x = transB? ((B_height-1)/BLOCK_SIZE + 1) : ((B_width-1)/BLOCK_SIZE + 1);
int grid_size_y = transA? ((A_width-1)/BLOCK_SIZE + 1) : ((A_height-1)/BLOCK_SIZE + 1);
dim3 dimGrid( grid_size_x, grid_size_y);
MatMulKernel<<<dimGrid,dimBlock>>>(X2,Y1,Z1,A_width,A_height,B_width,B_height, transA,transB);
}
void func(double* X1, double* Yf, long A, long B1, long val){
/*Performs a point-wise operation*/
long B=B1+val;
/* long i,j;
for (i=0; i<A; i++)
for (j=0; j<B1; j++)
X1(i,(j+val)) = foo(Yf(i,j),val); */
long len = A*B1;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuFunc<<<num_blocks, block_size>>>(X1, Yf, len, B1, val);
}
void gradient_func(double* X1, double* Yf, long A, long B){
/*Performs a point-wise operation*/
long B1=B+1;
/*
long i,j;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
X1(i,j) = Yf(i, (j+1))*(1 - pow (tanh (X1(i,j)), 2));
*/
long len = A*B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuGradientFunc<<<num_blocks, block_size>>>(X1, Yf, len, B);
}
void error(double* X1, double* Y, double* Z, long A, long B){
/*Calculates the Error*/
/*
long i,j;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
X1(i,j) = Y(i,j)-Z(i,j);
*/
long len = A*B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuMinus<<<num_blocks, block_size>>>(X1, Y, Z, len);
}
void reduction(double* Y, double* X1, long A, long B){
/*Performs the summation of probabilities*/
/*long i,j;
for (i=0; i<A; i++)
{
X1[i]=0;
for (j=0; j<B; j++)
X1[i] += Y(i,j);
}*/
int len = B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
double * HostX = (double * ) malloc(A * sizeof(double));
double *data;
double *d_partial_sums;
double *global_mem;
data = (double * ) myCudaMalloc1(sizeof(double) * len);
global_mem = (double * ) myCudaMalloc1( sizeof(double)* block_size);
d_partial_sums = (double * ) myCudaMalloc1( sizeof(double)* num_blocks);
for(int i = 0; i < A; ++i){
int tmp_block_size = block_size;
int tmp_num_blocks = num_blocks;
int data_len = len;
hipMemcpy(data, Y + i * len, data_len * sizeof(double), hipMemcpyDeviceToDevice);
while(true){
hipMemset(global_mem, 0, sizeof(double) * tmp_block_size);
cu_sum<<<tmp_num_blocks, tmp_block_size>>>(data, d_partial_sums, global_mem, data_len);
hipDeviceSynchronize();
data_len = tmp_num_blocks;
if(tmp_num_blocks == 1){
// copy the result back to the host
double host_res = 0;
hipMemcpy(&host_res, d_partial_sums, sizeof(double), hipMemcpyDeviceToHost);
HostX[i] = host_res;
break;
}else if(tmp_num_blocks <= block_size){
tmp_block_size = data_len;
tmp_num_blocks = 1;
hipMemcpy(data, d_partial_sums, data_len * sizeof(double), hipMemcpyDeviceToDevice);
}else{
tmp_block_size = THREADS_PER_BLOCK;
tmp_num_blocks = (data_len / tmp_block_size) + ((data_len % tmp_block_size) ? 1 : 0);
hipMemcpy(data, d_partial_sums, data_len * sizeof(double), hipMemcpyDeviceToDevice);
}
}
}
hipMemcpy(X1, HostX, A * sizeof(double), hipMemcpyHostToDevice); //copy back to the device
hipFree(global_mem);
hipFree(data);
hipFree(d_partial_sums);
free(HostX);
//displayMatrix2("HostX", HostX, A, 1);
}
void prob(double* Y,double* Z, double* X1, long A, long B){
/*Computes the normalized exponential*/
/*long i,j;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
Z(i,j) = Y(i,j)/X1[i];*/
long len = A*B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuDivideByVec<<<num_blocks, block_size>>>(Z, Y, X1, len, B);
}
void delta(double* Z, double* Y, long A, long B, double C){
/*Updates the weight matrix*/
/*
long i,j;
for (i=0; i<A; i++)
for (j=0; j<B; j++)
Z(i,j) -= C*Y(i,j);
*/
long len = A*B;
const size_t block_size = THREADS_PER_BLOCK;
const size_t num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
cuMinus<<<num_blocks, block_size>>>(Z, Z, Y, len, C);
}
//----------------Device kernels---------------------------------
__global__ void MatMulKernel(double* C, double* A, double* B, int A_width, int A_height, int B_width, int B_height, bool transA, bool transB)
{
int thread_row = threadIdx.y;
int thread_col = threadIdx.x;
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int Row = block_row * BLOCK_SIZE + thread_row,
Col = block_col * BLOCK_SIZE + thread_col;
int C_width = transB?B_height:B_width;
int C_height = transA?A_width:A_height;
//if(transB && !block_col && !block_row && !thread_col && !thread_row)printf("C: %d %d\n",C_width, C_height);
float Cvalue = 0;
for (int m = 0; m < (transA?A_height:A_width - 1) / BLOCK_SIZE + 1; ++m) {
__shared__ float shared_A[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float shared_B[BLOCK_SIZE][BLOCK_SIZE];
if(transA){
if(BLOCK_SIZE * m + thread_col < A_height && Row < A_width) {
shared_A[thread_row][thread_col] = A[(BLOCK_SIZE * m + thread_col) * A_width + Row];
}else{
shared_A[thread_row][thread_col] = 0;
}
}else{
if(Row < A_height && BLOCK_SIZE * m + thread_col < A_width) {
shared_A[thread_row][thread_col] = A[Row * A_width + BLOCK_SIZE * m + thread_col];
}else{
shared_A[thread_row][thread_col] = 0;
}
}
if(transB){
if( Col < B_height && BLOCK_SIZE * m + thread_row < B_width) {
shared_B[thread_row][thread_col] = B[ Col * B_width + BLOCK_SIZE * m + thread_row];
} else {
shared_B[thread_row][thread_col] = 0;
}
}else{
if(BLOCK_SIZE * m + thread_row < B_height && Col < B_width ) {
shared_B[thread_row][thread_col] = B[ (BLOCK_SIZE * m + thread_row) * B_width + Col];
} else {
shared_B[thread_row][thread_col] = 0;
}
}
// Synchronize to ensure all elements are read
__syncthreads();
#pragma unroll
for(int e=0; e<BLOCK_SIZE; ++e)
Cvalue += shared_A[thread_row][e] * shared_B[e][thread_col];
__syncthreads();
}
if(Row < C_height && Col < C_width) {
C[Row * C_width + Col] = Cvalue;
}
}
__global__ void cuMinus(double *C, double *A, double *B, int n, double delta){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(delta != 1){
C[tid] = A[tid] - B[tid] * delta;
}else{
C[tid] = A[tid] - B[tid];
}
tid += stride;
}
}
__global__ void cuDivideByVec(double *C, double *A, double *B, long n, long n_cols){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
C[tid] = A[tid] / B[tid/n_cols];
tid += stride;
}
}
__global__ void cuGradientFunc(double *A, double *B, long n, long n_cols){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = (1 - pow (tanh (A[tid]), 2)) * B[tid+1 + (tid)/n_cols];
tid += stride;
}
}
__global__ void cuFunc(double *A, double *B, long n, long n_cols, long val){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid+ val*(val+tid/n_cols)] = foo(B[tid],val);
tid += stride;
}
}
__global__ void cu_sum(const double* src, double* sum, double *global_mem, const int n){
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
//for(int i = 0 ; i < n; i++)printf("%lf ",src[i]);
// printf("\n");
double x = 0;
if(tid < n){
x = src[tid];
}
global_mem[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1){
if(threadIdx.x < offset){
// add a partial sum upstream to our own
global_mem[threadIdx.x] += global_mem[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0){
sum[blockIdx.x] = global_mem[0];
}
__syncthreads();
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<iostream>
#include<chrono>
__global__ void expansion(float *a, float *poly, long long int n, long long int degree, int i, int streamSize, int block){
if((i*streamSize*block)+threadIdx.x+blockIdx.x*blockDim.x < n){
float temp = a[threadIdx.x+blockIdx.x*blockDim.x];
float xpow = 1;
for (long long int i=0; i<degree; i++){
a[threadIdx.x+blockIdx.x*blockDim.x] += xpow*poly[i];
xpow *= temp;
}
}
}
int main(int argc, char* argv[]){
using namespace std::chrono;
long long int size = atoll(argv[1]);
long long int degree = atoll(argv[2]);
int block = atoi(argv[3]);
int streamCnt = atoi(argv[4]);
int streamSize = atoi(argv[5]);
long long int byten, bytepoly;
float *a, *d_a, *poly, *d_poly;
long long int numBlocks = (size+(block-1))/block;
long long int streamBlk = (numBlocks+(streamSize-1))/streamSize;
byten = streamSize*block*sizeof(float);
bytepoly = (degree+1)*sizeof(float);
cudaStream_t streams[streamCnt];
for(int i=0; i<streamCnt; i++){
cudaStreamCreate(&streams[i]);
}
cudaMallocHost((void **)&a, size*sizeof(float));
cudaMallocHost((void **)&poly, bytepoly);
//std::cout<<__LINE__<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
steady_clock::time_point start, end;
cudaMalloc((void **)&d_a, streamCnt*byten);
cudaMalloc((void **)&d_poly, bytepoly);
//std::cout<<__LINE__<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
for(long long int i=0; i<size; i++){
a[i] = 1.;
}
for(long long int i=0; i<degree+1; i++){
poly[i] = 1.;
}
start = steady_clock::now();
cudaMemcpyAsync(d_poly, poly, bytepoly, cudaMemcpyHostToDevice);
//std::cout<<__LINE__<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
for(long long int i=0; i<streamBlk; i++){
if(i*streamSize*block+streamSize*block < size){
cudaMemcpyAsync(&d_a[i%streamCnt*streamSize*block], &a[i*streamSize*block], byten, cudaMemcpyHostToDevice, streams[i%streamCnt]);
//std::cout<<i<<"\t"<<i*streamSize*block<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
expansion<<<streamSize, block, 0, streams[i%streamCnt]>>>(&d_a[i%streamCnt*streamSize*block], d_poly, size, degree, i, streamSize, block);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
cudaMemcpyAsync(&a[i*streamSize*block], &d_a[i%streamCnt*streamSize*block], byten, cudaMemcpyDeviceToHost, streams[i%streamCnt]);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
}else{
cudaMemcpyAsync(&d_a[i%streamCnt*streamSize*block], &a[i*streamSize*block], (size-i*streamSize*block)*sizeof(float), cudaMemcpyHostToDevice, streams[i%streamCnt]);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
expansion<<<streamSize, block, 0, streams[i%streamCnt]>>>(&d_a[i%streamCnt*streamSize*block], d_poly, size, degree, i, streamSize, block);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
cudaMemcpyAsync(&a[i*streamSize*block], &d_a[i%streamCnt*streamSize*block], (size-i*streamSize*block)*sizeof(float), cudaMemcpyDeviceToHost, streams[i%streamCnt]);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
}
}
cudaDeviceSynchronize();
std::cout<<__LINE__<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
end = steady_clock::now();
duration<double> elapsed_time = duration_cast<duration<double>>(end-start);
std::cout<<size<<"\t"<<degree<<"\t"<<(size*degree*sizeof(float))/elapsed_time.count()<<std::endl;
for(int i=0; i<streamCnt; i++){
cudaStreamDestroy(streams[i]);
}
cudaFree(d_a);
cudaFree(d_poly);
} | code for sm_80
Function : _Z9expansionPfS_xxiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff027624 */
/* 0x000fc600078e00ff */
/*0030*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0040*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */
/* 0x001fe400078e0200 */
/*0050*/ IMAD R0, R2, c[0x0][0x180], RZ ; /* 0x0000600002007a24 */
/* 0x000fc800078e02ff */
/*0060*/ IMAD R0, R0, c[0x0][0x188], R3 ; /* 0x0000620000007a24 */
/* 0x000fca00078e0203 */
/*0070*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fc80003f06070 */
/*0080*/ ISETP.GE.AND.EX P0, PT, RZ, c[0x0][0x174], PT, P0 ; /* 0x00005d00ff007a0c */
/* 0x000fda0003f06300 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ MOV R10, c[0x0][0x178] ; /* 0x00005e00000a7a02 */
/* 0x000fe20000000f00 */
/*00b0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff057624 */
/* 0x000fe400078e00ff */
/*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fe200078e00ff */
/*00d0*/ ISETP.GE.U32.AND P0, PT, R10, 0x1, PT ; /* 0x000000010a00780c */
/* 0x000fc60003f06070 */
/*00e0*/ IMAD.WIDE.U32 R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fe200078e0002 */
/*00f0*/ ISETP.GE.AND.EX P0, PT, R5, RZ, PT, P0 ; /* 0x000000ff0500720c */
/* 0x000fda0003f06300 */
/*0100*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0110*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */
/* 0x000fe40000000a00 */
/*0120*/ LDG.E R0, [R2.64] ; /* 0x0000000802007981 */
/* 0x000ea2000c1e1900 */
/*0130*/ IADD3 R4, P0, R10.reuse, -0x1, RZ ; /* 0xffffffff0a047810 */
/* 0x040fe20007f1e0ff */
/*0140*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0150*/ LOP3.LUT R10, R10, 0x3, RZ, 0xc0, !PT ; /* 0x000000030a0a7812 */
/* 0x000fe200078ec0ff */
/*0160*/ UMOV UR5, URZ ; /* 0x0000003f00057c82 */
/* 0x000fe20008000000 */
/*0170*/ ISETP.GE.U32.AND P1, PT, R4, 0x3, PT ; /* 0x000000030400780c */
/* 0x000fe20003f26070 */
/*0180*/ HFMA2.MMA R7, -RZ, RZ, 1.875, 0 ; /* 0x3f800000ff077435 */
/* 0x000fe200000001ff */
/*0190*/ IADD3.X R4, R5, -0x1, RZ, P0, !PT ; /* 0xffffffff05047810 */
/* 0x000fe400007fe4ff */
/*01a0*/ ISETP.NE.U32.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fc40003f05070 */
/*01b0*/ ISETP.GE.U32.AND.EX P1, PT, R4, RZ, PT, P1 ; /* 0x000000ff0400720c */
/* 0x000fe40003f26110 */
/*01c0*/ ISETP.NE.AND.EX P0, PT, RZ, RZ, PT, P0 ; /* 0x000000ffff00720c */
/* 0x000fe20003f05300 */
/*01d0*/ IMAD.MOV.U32 R6, RZ, RZ, R0 ; /* 0x000000ffff067224 */
/* 0x004fd400078e0000 */
/*01e0*/ @!P1 BRA 0x400 ; /* 0x0000021000009947 */
/* 0x000fea0003800000 */
/*01f0*/ IADD3 R12, P1, R10, -c[0x0][0x178], RZ ; /* 0x80005e000a0c7a10 */
/* 0x000fe20007f3e0ff */
/*0200*/ IMAD.MOV.U32 R7, RZ, RZ, 0x3f800000 ; /* 0x3f800000ff077424 */
/* 0x000fe200078e00ff */
/*0210*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */
/* 0x000fe20000000f00 */
/*0220*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff057624 */
/* 0x000fe200078e00ff */
/*0230*/ IADD3.X R14, RZ, ~c[0x0][0x17c], RZ, P1, !PT ; /* 0x80005f00ff0e7a10 */
/* 0x000fe20000ffe4ff */
/*0240*/ IMAD.MOV.U32 R6, RZ, RZ, R0 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0000 */
/*0250*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe40008000000 */
/*0260*/ UMOV UR5, URZ ; /* 0x0000003f00057c82 */
/* 0x000fe40008000000 */
/*0270*/ LDG.E R8, [R4.64] ; /* 0x0000000804087981 */
/* 0x000ea4000c1e1900 */
/*0280*/ FFMA R9, R8, R7, R6 ; /* 0x0000000708097223 */
/* 0x005fca0000000006 */
/*0290*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x0001e8000c101908 */
/*02a0*/ LDG.E R6, [R4.64+0x4] ; /* 0x0000040804067981 */
/* 0x000ea2000c1e1900 */
/*02b0*/ FMUL R7, R0, R7 ; /* 0x0000000700077220 */
/* 0x000fc80000400000 */
/*02c0*/ FFMA R11, R6, R7, R9 ; /* 0x00000007060b7223 */
/* 0x004fca0000000009 */
/*02d0*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x0001e8000c101908 */
/*02e0*/ LDG.E R6, [R4.64+0x8] ; /* 0x0000080804067981 */
/* 0x000ea2000c1e1900 */
/*02f0*/ FMUL R7, R0, R7 ; /* 0x0000000700077220 */
/* 0x000fe20000400000 */
/*0300*/ UIADD3 UR4, UP0, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fc8000ff1e03f */
/*0310*/ UIADD3.X UR5, URZ, UR5, URZ, UP0, !UPT ; /* 0x000000053f057290 */
/* 0x000fe400087fe43f */
/*0320*/ IADD3 R8, P2, R12, UR4, RZ ; /* 0x000000040c087c10 */
/* 0x000fe2000ff5e0ff */
/*0330*/ FFMA R13, R7, R6, R11 ; /* 0x00000006070d7223 */
/* 0x004fca000000000b */
/*0340*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */
/* 0x0001e8000c101908 */
/*0350*/ LDG.E R6, [R4.64+0xc] ; /* 0x00000c0804067981 */
/* 0x0002a2000c1e1900 */
/*0360*/ ISETP.NE.U32.AND P1, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f25070 */
/*0370*/ FMUL R7, R0, R7 ; /* 0x0000000700077220 */
/* 0x000fe20000400000 */
/*0380*/ IADD3.X R8, R14, UR5, RZ, P2, !PT ; /* 0x000000050e087c10 */
/* 0x000fc800097fe4ff */
/*0390*/ ISETP.NE.AND.EX P1, PT, R8, RZ, PT, P1 ; /* 0x000000ff0800720c */
/* 0x000fe40003f25310 */
/*03a0*/ IADD3 R4, P2, R4, 0x10, RZ ; /* 0x0000001004047810 */
/* 0x002fca0007f5e0ff */
/*03b0*/ IMAD.X R5, RZ, RZ, R5, P2 ; /* 0x000000ffff057224 */
/* 0x000fe400010e0605 */
/*03c0*/ FFMA R6, R7, R6, R13 ; /* 0x0000000607067223 */
/* 0x004fe4000000000d */
/*03d0*/ FMUL R7, R0, R7 ; /* 0x0000000700077220 */
/* 0x000fc60000400000 */
/*03e0*/ STG.E [R2.64], R6 ; /* 0x0000000602007986 */
/* 0x0001e2000c101908 */
/*03f0*/ @P1 BRA 0x270 ; /* 0xfffffe7000001947 */
/* 0x000fea000383ffff */
/*0400*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0410*/ IADD3 R8, P0, RZ, -R10, RZ ; /* 0x8000000aff087210 */
/* 0x000fe20007f1e0ff */
/*0420*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */
/* 0x000fe40000000a00 */
/*0430*/ ULEA UR6, UP0, UR4, UR6, 0x2 ; /* 0x0000000604067291 */
/* 0x000fe4000f80103f */
/*0440*/ IMAD.X R9, RZ, RZ, -0x1, P0 ; /* 0xffffffffff097424 */
/* 0x001fe400000e06ff */
/*0450*/ ULEA.HI.X UR4, UR4, UR7, UR5, 0x2, UP0 ; /* 0x0000000704047291 */
/* 0x000fca00080f1405 */
/*0460*/ MOV R4, UR6 ; /* 0x0000000600047c02 */
/* 0x000fe20008000f00 */
/*0470*/ IMAD.U32 R5, RZ, RZ, UR4 ; /* 0x00000004ff057e24 */
/* 0x000fca000f8e00ff */
/*0480*/ LDG.E R4, [R4.64] ; /* 0x0000000804047981 */
/* 0x000ea2000c1e1900 */
/*0490*/ IADD3 R8, P0, R8, 0x1, RZ ; /* 0x0000000108087810 */
/* 0x000fe20007f1e0ff */
/*04a0*/ UIADD3 UR6, UP0, UR6, 0x4, URZ ; /* 0x0000000406067890 */
/* 0x000fc6000ff1e03f */
/*04b0*/ IADD3.X R9, RZ, R9, RZ, P0, !PT ; /* 0x00000009ff097210 */
/* 0x000fe200007fe4ff */
/*04c0*/ UIADD3.X UR4, URZ, UR4, URZ, UP0, !UPT ; /* 0x000000043f047290 */
/* 0x000fe200087fe43f */
/*04d0*/ ISETP.NE.U32.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fc80003f05070 */
/*04e0*/ ISETP.NE.AND.EX P0, PT, R9, RZ, PT, P0 ; /* 0x000000ff0900720c */
/* 0x000fe20003f05300 */
/*04f0*/ FFMA R6, R4, R7.reuse, R6 ; /* 0x0000000704067223 */
/* 0x085fe40000000006 */
/*0500*/ FMUL R7, R0, R7 ; /* 0x0000000700077220 */
/* 0x000fc60000400000 */
/*0510*/ STG.E [R2.64], R6 ; /* 0x0000000602007986 */
/* 0x0001ee000c101908 */
/*0520*/ @P0 BRA 0x460 ; /* 0xffffff3000000947 */
/* 0x000fea000383ffff */
/*0530*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0540*/ BRA 0x540; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<iostream>
#include<chrono>
__global__ void expansion(float *a, float *poly, long long int n, long long int degree, int i, int streamSize, int block){
if((i*streamSize*block)+threadIdx.x+blockIdx.x*blockDim.x < n){
float temp = a[threadIdx.x+blockIdx.x*blockDim.x];
float xpow = 1;
for (long long int i=0; i<degree; i++){
a[threadIdx.x+blockIdx.x*blockDim.x] += xpow*poly[i];
xpow *= temp;
}
}
}
int main(int argc, char* argv[]){
using namespace std::chrono;
long long int size = atoll(argv[1]);
long long int degree = atoll(argv[2]);
int block = atoi(argv[3]);
int streamCnt = atoi(argv[4]);
int streamSize = atoi(argv[5]);
long long int byten, bytepoly;
float *a, *d_a, *poly, *d_poly;
long long int numBlocks = (size+(block-1))/block;
long long int streamBlk = (numBlocks+(streamSize-1))/streamSize;
byten = streamSize*block*sizeof(float);
bytepoly = (degree+1)*sizeof(float);
cudaStream_t streams[streamCnt];
for(int i=0; i<streamCnt; i++){
cudaStreamCreate(&streams[i]);
}
cudaMallocHost((void **)&a, size*sizeof(float));
cudaMallocHost((void **)&poly, bytepoly);
//std::cout<<__LINE__<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
steady_clock::time_point start, end;
cudaMalloc((void **)&d_a, streamCnt*byten);
cudaMalloc((void **)&d_poly, bytepoly);
//std::cout<<__LINE__<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
for(long long int i=0; i<size; i++){
a[i] = 1.;
}
for(long long int i=0; i<degree+1; i++){
poly[i] = 1.;
}
start = steady_clock::now();
cudaMemcpyAsync(d_poly, poly, bytepoly, cudaMemcpyHostToDevice);
//std::cout<<__LINE__<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
for(long long int i=0; i<streamBlk; i++){
if(i*streamSize*block+streamSize*block < size){
cudaMemcpyAsync(&d_a[i%streamCnt*streamSize*block], &a[i*streamSize*block], byten, cudaMemcpyHostToDevice, streams[i%streamCnt]);
//std::cout<<i<<"\t"<<i*streamSize*block<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
expansion<<<streamSize, block, 0, streams[i%streamCnt]>>>(&d_a[i%streamCnt*streamSize*block], d_poly, size, degree, i, streamSize, block);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
cudaMemcpyAsync(&a[i*streamSize*block], &d_a[i%streamCnt*streamSize*block], byten, cudaMemcpyDeviceToHost, streams[i%streamCnt]);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
}else{
cudaMemcpyAsync(&d_a[i%streamCnt*streamSize*block], &a[i*streamSize*block], (size-i*streamSize*block)*sizeof(float), cudaMemcpyHostToDevice, streams[i%streamCnt]);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
expansion<<<streamSize, block, 0, streams[i%streamCnt]>>>(&d_a[i%streamCnt*streamSize*block], d_poly, size, degree, i, streamSize, block);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
cudaMemcpyAsync(&a[i*streamSize*block], &d_a[i%streamCnt*streamSize*block], (size-i*streamSize*block)*sizeof(float), cudaMemcpyDeviceToHost, streams[i%streamCnt]);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
}
}
cudaDeviceSynchronize();
std::cout<<__LINE__<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
end = steady_clock::now();
duration<double> elapsed_time = duration_cast<duration<double>>(end-start);
std::cout<<size<<"\t"<<degree<<"\t"<<(size*degree*sizeof(float))/elapsed_time.count()<<std::endl;
for(int i=0; i<streamCnt; i++){
cudaStreamDestroy(streams[i]);
}
cudaFree(d_a);
cudaFree(d_poly);
} | .file "tmpxft_0009e172_00000000-6_polyExp.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3778:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3778:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii
.type _Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii, @function
_Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii:
.LFB3800:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z9expansionPfS_xxiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3800:
.size _Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii, .-_Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii
.globl _Z9expansionPfS_xxiii
.type _Z9expansionPfS_xxiii, @function
_Z9expansionPfS_xxiii:
.LFB3801:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3801:
.size _Z9expansionPfS_xxiii, .-_Z9expansionPfS_xxiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "\t"
.text
.globl main
.type main, @function
main:
.LFB3768:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $216, %rsp
.cfi_offset 15, -24
.cfi_offset 14, -32
.cfi_offset 13, -40
.cfi_offset 12, -48
.cfi_offset 3, -56
movq %rsi, %rbx
movq %fs:40, %rax
movq %rax, -56(%rbp)
xorl %eax, %eax
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtoll@PLT
movq %rax, %r13
movq %rax, -120(%rbp)
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtoll@PLT
movq %rax, %r12
movq %rax, -216(%rbp)
movq 24(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r15
movq %rax, -184(%rbp)
movl %eax, -232(%rbp)
movq 32(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r14
movq 40(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rcx
movq %rax, -176(%rbp)
movl %eax, -228(%rbp)
movslq %r15d, %rbx
movq %rbx, -152(%rbp)
movslq %eax, %rdi
movq %rdi, -160(%rbp)
movl %r15d, %eax
subl $1, %eax
cltq
addq %r13, %rax
cqto
idivq %rbx
leal -1(%rcx), %edx
movslq %edx, %rdx
addq %rdx, %rax
cqto
idivq %rdi
movq %rax, -144(%rbp)
movl %r15d, %eax
imull %ecx, %eax
movslq %eax, %r15
leaq 0(,%r15,4), %rax
movq %rax, -128(%rbp)
leaq 4(,%r12,4), %r13
movslq %r14d, %rax
movq %rax, -136(%rbp)
leaq 15(,%rax,8), %rax
movq %rax, %rcx
andq $-16, %rcx
andq $-4096, %rax
movq %rsp, %rdx
subq %rax, %rdx
.L12:
cmpq %rdx, %rsp
je .L13
subq $4096, %rsp
orq $0, 4088(%rsp)
jmp .L12
.L13:
movq %rcx, %rax
andl $4095, %eax
subq %rax, %rsp
testq %rax, %rax
je .L14
orq $0, -8(%rsp,%rax)
.L14:
movq %rsp, %rcx
movq %rcx, -168(%rbp)
testl %r14d, %r14d
jle .L15
movq %rcx, %rbx
leal -1(%r14), %eax
leaq 8(%rcx,%rax,8), %r12
.L16:
movq %rbx, %rdi
call cudaStreamCreate@PLT
addq $8, %rbx
cmpq %r12, %rbx
jne .L16
.L15:
movq -120(%rbp), %rbx
leaq 0(,%rbx,4), %rsi
leaq -112(%rbp), %rdi
call cudaMallocHost@PLT
leaq -96(%rbp), %rdi
movq %r13, %rsi
call cudaMallocHost@PLT
movq -136(%rbp), %rsi
movq -128(%rbp), %rax
imulq %rax, %rsi
leaq -104(%rbp), %rdi
call cudaMalloc@PLT
leaq -88(%rbp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movq %rbx, %rcx
testq %rbx, %rbx
jle .L17
movl $0, %eax
movss .LC0(%rip), %xmm0
.L18:
movq -112(%rbp), %rdx
movss %xmm0, (%rdx,%rax,4)
addq $1, %rax
cmpq %rcx, %rax
jne .L18
.L17:
movq -216(%rbp), %rax
testq %rax, %rax
js .L19
leaq 1(%rax), %rcx
movl $0, %eax
movss .LC0(%rip), %xmm0
.L20:
movq -96(%rbp), %rdx
movss %xmm0, (%rdx,%rax,4)
addq $1, %rax
cmpq %rcx, %rax
jne .L20
.L19:
call _ZNSt6chrono3_V212steady_clock3nowEv@PLT
movq %rax, -240(%rbp)
movl $0, %r8d
movl $1, %ecx
movq %r13, %rdx
movq -96(%rbp), %rsi
movq -88(%rbp), %rdi
call cudaMemcpyAsync@PLT
cmpq $0, -144(%rbp)
jle .L21
movq -152(%rbp), %rax
movq -160(%rbp), %rcx
imulq %rcx, %rax
movq %rax, -192(%rbp)
movq -120(%rbp), %rax
addq %r15, %rax
salq $2, %rax
movq %rax, -224(%rbp)
movq %r15, %rax
negq %rax
salq $2, %rax
movq %rax, -200(%rbp)
movl $0, %r12d
movq %r14, -248(%rbp)
jmp .L26
.L23:
addq -104(%rbp), %rbx
movq %rbx, %rsi
movq %r13, %rdi
addq -112(%rbp), %rdi
movq (%r14), %r8
movl $2, %ecx
movq -128(%rbp), %rdx
call cudaMemcpyAsync@PLT
.L24:
addq $1, %r12
movq -192(%rbp), %rax
addq %rax, %r15
cmpq %r12, -144(%rbp)
je .L38
.L26:
cmpq %r15, -120(%rbp)
jle .L22
movq %r12, %rax
cqto
idivq -136(%rbp)
movq -168(%rbp), %rax
leaq (%rax,%rdx,8), %r14
movq -200(%rbp), %rax
leaq (%rax,%r15,4), %r13
movq -160(%rbp), %rax
imulq %rax, %rdx
movq -152(%rbp), %rax
imulq %rax, %rdx
leaq 0(,%rdx,4), %rbx
movq %r13, %rsi
addq -112(%rbp), %rsi
movq %rbx, %rdi
addq -104(%rbp), %rdi
movq (%r14), %r8
movl $1, %ecx
movq -128(%rbp), %rdx
call cudaMemcpyAsync@PLT
movl -184(%rbp), %eax
movl %eax, -68(%rbp)
movl $1, -64(%rbp)
movl $1, -60(%rbp)
movl -176(%rbp), %eax
movl %eax, -80(%rbp)
movl $1, -76(%rbp)
movl $1, -72(%rbp)
movq (%r14), %r9
movl $0, %r8d
movq -68(%rbp), %rdx
movl $1, %ecx
movq -80(%rbp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L23
movq %rbx, %rdi
addq -104(%rbp), %rdi
subq $8, %rsp
movl -232(%rbp), %eax
pushq %rax
movl -228(%rbp), %r9d
movl %r12d, %r8d
movq -216(%rbp), %rcx
movq -120(%rbp), %rdx
movq -88(%rbp), %rsi
call _Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii
addq $16, %rsp
jmp .L23
.L22:
movq %r12, %rax
cqto
idivq -136(%rbp)
movq -168(%rbp), %rax
leaq (%rax,%rdx,8), %r14
movq %r15, %rax
negq %rax
movq -224(%rbp), %rcx
leaq (%rcx,%rax,4), %rax
movq %rax, -208(%rbp)
movq -200(%rbp), %rcx
leaq (%rcx,%r15,4), %r13
movq -160(%rbp), %rcx
imulq %rcx, %rdx
movq -152(%rbp), %rcx
imulq %rcx, %rdx
leaq 0(,%rdx,4), %rbx
movq %r13, %rsi
addq -112(%rbp), %rsi
movq %rbx, %rdi
addq -104(%rbp), %rdi
movq (%r14), %r8
movl $1, %ecx
movq %rax, %rdx
call cudaMemcpyAsync@PLT
movl -184(%rbp), %eax
movl %eax, -68(%rbp)
movl $1, -64(%rbp)
movl $1, -60(%rbp)
movl -176(%rbp), %eax
movl %eax, -80(%rbp)
movl $1, -76(%rbp)
movl $1, -72(%rbp)
movq (%r14), %r9
movl $0, %r8d
movq -68(%rbp), %rdx
movl $1, %ecx
movq -80(%rbp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L39
.L25:
addq -104(%rbp), %rbx
movq %rbx, %rsi
movq %r13, %rdi
addq -112(%rbp), %rdi
movq (%r14), %r8
movl $2, %ecx
movq -208(%rbp), %rdx
call cudaMemcpyAsync@PLT
jmp .L24
.L39:
movq %rbx, %rdi
addq -104(%rbp), %rdi
subq $8, %rsp
movl -232(%rbp), %eax
pushq %rax
movl -228(%rbp), %r9d
movl %r12d, %r8d
movq -216(%rbp), %rcx
movq -120(%rbp), %rdx
movq -88(%rbp), %rsi
call _Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii
addq $16, %rsp
jmp .L25
.L38:
movq -248(%rbp), %r14
.L21:
call cudaDeviceSynchronize@PLT
movl $70, %esi
leaq _ZSt4cout(%rip), %r13
movq %r13, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC1(%rip), %rbx
movq %rbx, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %r12
call cudaGetLastError@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rsi
movq %r12, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
call _ZNSt6chrono3_V212steady_clock3nowEv@PLT
movq -240(%rbp), %rsi
subq %rsi, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC2(%rip), %xmm0
movsd %xmm0, -128(%rbp)
movq -120(%rbp), %r12
movq %r12, %rsi
movq %r13, %rdi
call _ZNSo9_M_insertIxEERSoT_@PLT
movq %rax, %rdi
movq %rbx, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq -216(%rbp), %r15
movq %r15, %rsi
call _ZNSo9_M_insertIxEERSoT_@PLT
movq %rax, %rdi
movq %rbx, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %r15, %rdx
imulq %r12, %rdx
salq $2, %rdx
js .L27
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
.L28:
divsd -128(%rbp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
testl %r14d, %r14d
jle .L29
movq -168(%rbp), %rsi
movq %rsi, %rbx
leal -1(%r14), %eax
leaq 8(%rsi,%rax,8), %r12
.L30:
movq (%rbx), %rdi
call cudaStreamDestroy@PLT
addq $8, %rbx
cmpq %r12, %rbx
jne .L30
.L29:
movq -104(%rbp), %rdi
call cudaFree@PLT
movq -88(%rbp), %rdi
call cudaFree@PLT
movq -56(%rbp), %rax
subq %fs:40, %rax
jne .L40
movl $0, %eax
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.L27:
.cfi_restore_state
shrq %rdx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
addsd %xmm0, %xmm0
jmp .L28
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3768:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z9expansionPfS_xxiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3803:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9expansionPfS_xxiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3803:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long 0
.long 1104006501
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<iostream>
#include<chrono>
__global__ void expansion(float *a, float *poly, long long int n, long long int degree, int i, int streamSize, int block){
if((i*streamSize*block)+threadIdx.x+blockIdx.x*blockDim.x < n){
float temp = a[threadIdx.x+blockIdx.x*blockDim.x];
float xpow = 1;
for (long long int i=0; i<degree; i++){
a[threadIdx.x+blockIdx.x*blockDim.x] += xpow*poly[i];
xpow *= temp;
}
}
}
int main(int argc, char* argv[]){
using namespace std::chrono;
long long int size = atoll(argv[1]);
long long int degree = atoll(argv[2]);
int block = atoi(argv[3]);
int streamCnt = atoi(argv[4]);
int streamSize = atoi(argv[5]);
long long int byten, bytepoly;
float *a, *d_a, *poly, *d_poly;
long long int numBlocks = (size+(block-1))/block;
long long int streamBlk = (numBlocks+(streamSize-1))/streamSize;
byten = streamSize*block*sizeof(float);
bytepoly = (degree+1)*sizeof(float);
cudaStream_t streams[streamCnt];
for(int i=0; i<streamCnt; i++){
cudaStreamCreate(&streams[i]);
}
cudaMallocHost((void **)&a, size*sizeof(float));
cudaMallocHost((void **)&poly, bytepoly);
//std::cout<<__LINE__<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
steady_clock::time_point start, end;
cudaMalloc((void **)&d_a, streamCnt*byten);
cudaMalloc((void **)&d_poly, bytepoly);
//std::cout<<__LINE__<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
for(long long int i=0; i<size; i++){
a[i] = 1.;
}
for(long long int i=0; i<degree+1; i++){
poly[i] = 1.;
}
start = steady_clock::now();
cudaMemcpyAsync(d_poly, poly, bytepoly, cudaMemcpyHostToDevice);
//std::cout<<__LINE__<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
for(long long int i=0; i<streamBlk; i++){
if(i*streamSize*block+streamSize*block < size){
cudaMemcpyAsync(&d_a[i%streamCnt*streamSize*block], &a[i*streamSize*block], byten, cudaMemcpyHostToDevice, streams[i%streamCnt]);
//std::cout<<i<<"\t"<<i*streamSize*block<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
expansion<<<streamSize, block, 0, streams[i%streamCnt]>>>(&d_a[i%streamCnt*streamSize*block], d_poly, size, degree, i, streamSize, block);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
cudaMemcpyAsync(&a[i*streamSize*block], &d_a[i%streamCnt*streamSize*block], byten, cudaMemcpyDeviceToHost, streams[i%streamCnt]);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
}else{
cudaMemcpyAsync(&d_a[i%streamCnt*streamSize*block], &a[i*streamSize*block], (size-i*streamSize*block)*sizeof(float), cudaMemcpyHostToDevice, streams[i%streamCnt]);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
expansion<<<streamSize, block, 0, streams[i%streamCnt]>>>(&d_a[i%streamCnt*streamSize*block], d_poly, size, degree, i, streamSize, block);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
cudaMemcpyAsync(&a[i*streamSize*block], &d_a[i%streamCnt*streamSize*block], (size-i*streamSize*block)*sizeof(float), cudaMemcpyDeviceToHost, streams[i%streamCnt]);
//std::cout<<i<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
}
}
cudaDeviceSynchronize();
std::cout<<__LINE__<<"\t"<<cudaGetErrorString(cudaGetLastError())<<std::endl;
end = steady_clock::now();
duration<double> elapsed_time = duration_cast<duration<double>>(end-start);
std::cout<<size<<"\t"<<degree<<"\t"<<(size*degree*sizeof(float))/elapsed_time.count()<<std::endl;
for(int i=0; i<streamCnt; i++){
cudaStreamDestroy(streams[i]);
}
cudaFree(d_a);
cudaFree(d_poly);
} | #include <hip/hip_runtime.h>
#include<iostream>
#include<chrono>
__global__ void expansion(float *a, float *poly, long long int n, long long int degree){
if(threadIdx.x+blockIdx.x*blockDim.x < n){
float temp = a[threadIdx.x+blockIdx.x*blockDim.x];
float xpow = 1;
for (int i=0; i<degree; i++){
a[threadIdx.x+blockIdx.x*blockDim.x] += xpow*poly[i];
xpow *= temp;
}
}
}
int main(int argc, char* argv[]){
using namespace std::chrono;
long long int size = atoi(argv[1]);
long long int degree = atoi(argv[2]);
int block = atoi(argv[3]);
long long int bytepoly, byten;
bytepoly = (degree+1)*sizeof(float);
byten = size*sizeof(float);
float *a, *d_a, *poly, *d_poly;
long long int *d_degree;
a = (float *)malloc(byten);
poly = (float *)malloc(bytepoly);
steady_clock::time_point start, end;
hipMalloc((void **)&d_a, byten);
hipMalloc((void **)&d_poly, bytepoly);
hipMalloc((void **)&d_degree, sizeof(int));
for(long long int i=0; i<size; i++){
a[i] = 1.;
}
for(long long int i=0; i<degree+1; i++){
poly[i] = 2.;
}
start = steady_clock::now();
hipMemcpy(d_a, a, byten, hipMemcpyHostToDevice);
hipDeviceSynchronize();
end = steady_clock::now();
hipMemcpy(d_poly, poly, bytepoly, hipMemcpyHostToDevice);
expansion<<<((size+(block-1))/block),block>>>(d_a, d_poly, size, degree);
printf("%s\n",hipGetErrorString(hipGetLastError()));
hipMemcpy(a, d_a, byten, hipMemcpyDeviceToHost);
duration<double> elapsed_time = duration_cast<duration<double>>(end-start);
printf("%s\n",hipGetErrorString(hipGetLastError()));
std::cout<<size<<"\t"<<degree<<"\t"<<size/elapsed_time.count()<<"\n";
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include<iostream>
#include<chrono>
__global__ void expansion(float *a, float *poly, long long int n, long long int degree){
if(threadIdx.x+blockIdx.x*blockDim.x < n){
float temp = a[threadIdx.x+blockIdx.x*blockDim.x];
float xpow = 1;
for (int i=0; i<degree; i++){
a[threadIdx.x+blockIdx.x*blockDim.x] += xpow*poly[i];
xpow *= temp;
}
}
}
int main(int argc, char* argv[]){
using namespace std::chrono;
long long int size = atoi(argv[1]);
long long int degree = atoi(argv[2]);
int block = atoi(argv[3]);
long long int bytepoly, byten;
bytepoly = (degree+1)*sizeof(float);
byten = size*sizeof(float);
float *a, *d_a, *poly, *d_poly;
long long int *d_degree;
a = (float *)malloc(byten);
poly = (float *)malloc(bytepoly);
steady_clock::time_point start, end;
hipMalloc((void **)&d_a, byten);
hipMalloc((void **)&d_poly, bytepoly);
hipMalloc((void **)&d_degree, sizeof(int));
for(long long int i=0; i<size; i++){
a[i] = 1.;
}
for(long long int i=0; i<degree+1; i++){
poly[i] = 2.;
}
start = steady_clock::now();
hipMemcpy(d_a, a, byten, hipMemcpyHostToDevice);
hipDeviceSynchronize();
end = steady_clock::now();
hipMemcpy(d_poly, poly, bytepoly, hipMemcpyHostToDevice);
expansion<<<((size+(block-1))/block),block>>>(d_a, d_poly, size, degree);
printf("%s\n",hipGetErrorString(hipGetLastError()));
hipMemcpy(a, d_a, byten, hipMemcpyDeviceToHost);
duration<double> elapsed_time = duration_cast<duration<double>>(end-start);
printf("%s\n",hipGetErrorString(hipGetLastError()));
std::cout<<size<<"\t"<<degree<<"\t"<<size/elapsed_time.count()<<"\n";
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9expansionPfS_xx
.globl _Z9expansionPfS_xx
.p2align 8
.type _Z9expansionPfS_xx,@function
_Z9expansionPfS_xx:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
v_cmp_gt_i64_e32 vcc_lo, s[2:3], v[1:2]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_4
s_load_b64 s[4:5], s[0:1], 0x18
s_waitcnt lgkmcnt(0)
v_cmp_lt_i64_e64 s2, s[4:5], 1
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s2
s_cbranch_vccnz .LBB0_4
s_load_b128 s[0:3], s[0:1], 0x0
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_dual_mov_b32 v3, 1.0 :: v_dual_mov_b32 v4, 0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_mov_b64 s[0:1], 0
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_mov_b32_e32 v5, v2
.LBB0_3:
global_load_b32 v6, v4, s[2:3]
s_add_u32 s0, s0, 1
s_addc_u32 s1, s1, 0
s_add_u32 s2, s2, 4
v_cmp_lt_i64_e64 s6, s[0:1], s[4:5]
s_addc_u32 s3, s3, 0
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s6
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v5, v3, v6
v_mul_f32_e32 v3, v2, v3
global_store_b32 v[0:1], v5, off
s_cbranch_vccnz .LBB0_3
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9expansionPfS_xx
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9expansionPfS_xx, .Lfunc_end0-_Z9expansionPfS_xx
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 8
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9expansionPfS_xx
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9expansionPfS_xx.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include<iostream>
#include<chrono>
__global__ void expansion(float *a, float *poly, long long int n, long long int degree){
if(threadIdx.x+blockIdx.x*blockDim.x < n){
float temp = a[threadIdx.x+blockIdx.x*blockDim.x];
float xpow = 1;
for (int i=0; i<degree; i++){
a[threadIdx.x+blockIdx.x*blockDim.x] += xpow*poly[i];
xpow *= temp;
}
}
}
int main(int argc, char* argv[]){
using namespace std::chrono;
long long int size = atoi(argv[1]);
long long int degree = atoi(argv[2]);
int block = atoi(argv[3]);
long long int bytepoly, byten;
bytepoly = (degree+1)*sizeof(float);
byten = size*sizeof(float);
float *a, *d_a, *poly, *d_poly;
long long int *d_degree;
a = (float *)malloc(byten);
poly = (float *)malloc(bytepoly);
steady_clock::time_point start, end;
hipMalloc((void **)&d_a, byten);
hipMalloc((void **)&d_poly, bytepoly);
hipMalloc((void **)&d_degree, sizeof(int));
for(long long int i=0; i<size; i++){
a[i] = 1.;
}
for(long long int i=0; i<degree+1; i++){
poly[i] = 2.;
}
start = steady_clock::now();
hipMemcpy(d_a, a, byten, hipMemcpyHostToDevice);
hipDeviceSynchronize();
end = steady_clock::now();
hipMemcpy(d_poly, poly, bytepoly, hipMemcpyHostToDevice);
expansion<<<((size+(block-1))/block),block>>>(d_a, d_poly, size, degree);
printf("%s\n",hipGetErrorString(hipGetLastError()));
hipMemcpy(a, d_a, byten, hipMemcpyDeviceToHost);
duration<double> elapsed_time = duration_cast<duration<double>>(end-start);
printf("%s\n",hipGetErrorString(hipGetLastError()));
std::cout<<size<<"\t"<<degree<<"\t"<<size/elapsed_time.count()<<"\n";
} | .text
.file "polyExp.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z24__device_stub__expansionPfS_xx # -- Begin function _Z24__device_stub__expansionPfS_xx
.p2align 4, 0x90
.type _Z24__device_stub__expansionPfS_xx,@function
_Z24__device_stub__expansionPfS_xx: # @_Z24__device_stub__expansionPfS_xx
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movq %rcx, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9expansionPfS_xx, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__expansionPfS_xx, .Lfunc_end0-_Z24__device_stub__expansionPfS_xx
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x41cdcd6500000000 # double 1.0E+9
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, 56(%rsp) # 8-byte Spill
movslq %eax, %r15
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
cltq
movq 24(%rbx), %rdi
movq %rax, %rbx
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, 40(%rsp) # 8-byte Spill
leaq 4(,%rbx,4), %r14
leaq (,%r15,4), %rbp
movq %rbp, %rdi
callq malloc
movq %rax, %r13
movq %r14, %rdi
callq malloc
movq %rax, %r12
leaq 16(%rsp), %rdi
movq %rbp, 8(%rsp) # 8-byte Spill
movq %rbp, %rsi
callq hipMalloc
leaq 32(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 176(%rsp), %rdi
movl $4, %esi
callq hipMalloc
testq %r15, %r15
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $1065353216, (%r13,%rax,4) # imm = 0x3F800000
incq %rax
cmpq %rax, %r15
jne .LBB1_2
.LBB1_3: # %.preheader
testq %rbx, %rbx
js .LBB1_6
# %bb.4: # %.lr.ph55.preheader
leaq 1(%rbx), %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_5: # %.lr.ph55
# =>This Inner Loop Header: Depth=1
movl $1073741824, (%r12,%rcx,4) # imm = 0x40000000
incq %rcx
cmpq %rcx, %rax
jne .LBB1_5
.LBB1_6: # %._crit_edge
movq %rbx, 24(%rsp) # 8-byte Spill
callq _ZNSt6chrono3_V212steady_clock3nowEv
movq %rax, 48(%rsp) # 8-byte Spill
movq 16(%rsp), %rdi
movq %r13, %rsi
movq 8(%rsp), %rbp # 8-byte Reload
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipDeviceSynchronize
callq _ZNSt6chrono3_V212steady_clock3nowEv
movq %rax, %rbx
movq 32(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 40(%rsp), %rcx # 8-byte Reload
leal -1(%rcx), %eax
cltq
addq %r15, %rax
movslq %ecx, %rcx
cqto
idivq %rcx
movl %eax, %edi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl %ecx, %edx
orq %rax, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_8
# %bb.7:
movq 16(%rsp), %rax
movq 32(%rsp), %rcx
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %r15, 120(%rsp)
movq 24(%rsp), %rax # 8-byte Reload
movq %rax, 112(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 112(%rsp), %rax
movq %rax, 168(%rsp)
leaq 96(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 64(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z9expansionPfS_xx, %edi
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_8:
callq hipGetLastError
movl %eax, %edi
callq hipGetErrorString
movq %rax, %rdi
callq puts@PLT
movq 16(%rsp), %rsi
movq %r13, %rdi
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
subq 48(%rsp), %rbx # 8-byte Folded Reload
cvtsi2sd %rbx, %xmm0
divsd .LCPI1_0(%rip), %xmm0
movsd %xmm0, 8(%rsp) # 8-byte Spill
callq hipGetLastError
movl %eax, %edi
callq hipGetErrorString
movq %rax, %rdi
callq puts@PLT
movl $_ZSt4cout, %edi
movq %r15, %rsi
callq _ZNSo9_M_insertIxEERSoT_
movq %rax, %rbx
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %rbx, %rdi
movq 24(%rsp), %rsi # 8-byte Reload
callq _ZNSo9_M_insertIxEERSoT_
movq %rax, %rbx
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
xorps %xmm0, %xmm0
cvtsi2sdl 56(%rsp), %xmm0 # 4-byte Folded Reload
divsd 8(%rsp), %xmm0 # 8-byte Folded Reload
movq %rbx, %rdi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.2, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9expansionPfS_xx, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9expansionPfS_xx,@object # @_Z9expansionPfS_xx
.section .rodata,"a",@progbits
.globl _Z9expansionPfS_xx
.p2align 3, 0x0
_Z9expansionPfS_xx:
.quad _Z24__device_stub__expansionPfS_xx
.size _Z9expansionPfS_xx, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "\t"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\n"
.size .L.str.2, 2
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9expansionPfS_xx"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__expansionPfS_xx
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9expansionPfS_xx
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9expansionPfS_xxiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff027624 */
/* 0x000fc600078e00ff */
/*0030*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0040*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */
/* 0x001fe400078e0200 */
/*0050*/ IMAD R0, R2, c[0x0][0x180], RZ ; /* 0x0000600002007a24 */
/* 0x000fc800078e02ff */
/*0060*/ IMAD R0, R0, c[0x0][0x188], R3 ; /* 0x0000620000007a24 */
/* 0x000fca00078e0203 */
/*0070*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fc80003f06070 */
/*0080*/ ISETP.GE.AND.EX P0, PT, RZ, c[0x0][0x174], PT, P0 ; /* 0x00005d00ff007a0c */
/* 0x000fda0003f06300 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ MOV R10, c[0x0][0x178] ; /* 0x00005e00000a7a02 */
/* 0x000fe20000000f00 */
/*00b0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff057624 */
/* 0x000fe400078e00ff */
/*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fe200078e00ff */
/*00d0*/ ISETP.GE.U32.AND P0, PT, R10, 0x1, PT ; /* 0x000000010a00780c */
/* 0x000fc60003f06070 */
/*00e0*/ IMAD.WIDE.U32 R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fe200078e0002 */
/*00f0*/ ISETP.GE.AND.EX P0, PT, R5, RZ, PT, P0 ; /* 0x000000ff0500720c */
/* 0x000fda0003f06300 */
/*0100*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0110*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */
/* 0x000fe40000000a00 */
/*0120*/ LDG.E R0, [R2.64] ; /* 0x0000000802007981 */
/* 0x000ea2000c1e1900 */
/*0130*/ IADD3 R4, P0, R10.reuse, -0x1, RZ ; /* 0xffffffff0a047810 */
/* 0x040fe20007f1e0ff */
/*0140*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0150*/ LOP3.LUT R10, R10, 0x3, RZ, 0xc0, !PT ; /* 0x000000030a0a7812 */
/* 0x000fe200078ec0ff */
/*0160*/ UMOV UR5, URZ ; /* 0x0000003f00057c82 */
/* 0x000fe20008000000 */
/*0170*/ ISETP.GE.U32.AND P1, PT, R4, 0x3, PT ; /* 0x000000030400780c */
/* 0x000fe20003f26070 */
/*0180*/ HFMA2.MMA R7, -RZ, RZ, 1.875, 0 ; /* 0x3f800000ff077435 */
/* 0x000fe200000001ff */
/*0190*/ IADD3.X R4, R5, -0x1, RZ, P0, !PT ; /* 0xffffffff05047810 */
/* 0x000fe400007fe4ff */
/*01a0*/ ISETP.NE.U32.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fc40003f05070 */
/*01b0*/ ISETP.GE.U32.AND.EX P1, PT, R4, RZ, PT, P1 ; /* 0x000000ff0400720c */
/* 0x000fe40003f26110 */
/*01c0*/ ISETP.NE.AND.EX P0, PT, RZ, RZ, PT, P0 ; /* 0x000000ffff00720c */
/* 0x000fe20003f05300 */
/*01d0*/ IMAD.MOV.U32 R6, RZ, RZ, R0 ; /* 0x000000ffff067224 */
/* 0x004fd400078e0000 */
/*01e0*/ @!P1 BRA 0x400 ; /* 0x0000021000009947 */
/* 0x000fea0003800000 */
/*01f0*/ IADD3 R12, P1, R10, -c[0x0][0x178], RZ ; /* 0x80005e000a0c7a10 */
/* 0x000fe20007f3e0ff */
/*0200*/ IMAD.MOV.U32 R7, RZ, RZ, 0x3f800000 ; /* 0x3f800000ff077424 */
/* 0x000fe200078e00ff */
/*0210*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */
/* 0x000fe20000000f00 */
/*0220*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff057624 */
/* 0x000fe200078e00ff */
/*0230*/ IADD3.X R14, RZ, ~c[0x0][0x17c], RZ, P1, !PT ; /* 0x80005f00ff0e7a10 */
/* 0x000fe20000ffe4ff */
/*0240*/ IMAD.MOV.U32 R6, RZ, RZ, R0 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0000 */
/*0250*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe40008000000 */
/*0260*/ UMOV UR5, URZ ; /* 0x0000003f00057c82 */
/* 0x000fe40008000000 */
/*0270*/ LDG.E R8, [R4.64] ; /* 0x0000000804087981 */
/* 0x000ea4000c1e1900 */
/*0280*/ FFMA R9, R8, R7, R6 ; /* 0x0000000708097223 */
/* 0x005fca0000000006 */
/*0290*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x0001e8000c101908 */
/*02a0*/ LDG.E R6, [R4.64+0x4] ; /* 0x0000040804067981 */
/* 0x000ea2000c1e1900 */
/*02b0*/ FMUL R7, R0, R7 ; /* 0x0000000700077220 */
/* 0x000fc80000400000 */
/*02c0*/ FFMA R11, R6, R7, R9 ; /* 0x00000007060b7223 */
/* 0x004fca0000000009 */
/*02d0*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x0001e8000c101908 */
/*02e0*/ LDG.E R6, [R4.64+0x8] ; /* 0x0000080804067981 */
/* 0x000ea2000c1e1900 */
/*02f0*/ FMUL R7, R0, R7 ; /* 0x0000000700077220 */
/* 0x000fe20000400000 */
/*0300*/ UIADD3 UR4, UP0, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fc8000ff1e03f */
/*0310*/ UIADD3.X UR5, URZ, UR5, URZ, UP0, !UPT ; /* 0x000000053f057290 */
/* 0x000fe400087fe43f */
/*0320*/ IADD3 R8, P2, R12, UR4, RZ ; /* 0x000000040c087c10 */
/* 0x000fe2000ff5e0ff */
/*0330*/ FFMA R13, R7, R6, R11 ; /* 0x00000006070d7223 */
/* 0x004fca000000000b */
/*0340*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */
/* 0x0001e8000c101908 */
/*0350*/ LDG.E R6, [R4.64+0xc] ; /* 0x00000c0804067981 */
/* 0x0002a2000c1e1900 */
/*0360*/ ISETP.NE.U32.AND P1, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f25070 */
/*0370*/ FMUL R7, R0, R7 ; /* 0x0000000700077220 */
/* 0x000fe20000400000 */
/*0380*/ IADD3.X R8, R14, UR5, RZ, P2, !PT ; /* 0x000000050e087c10 */
/* 0x000fc800097fe4ff */
/*0390*/ ISETP.NE.AND.EX P1, PT, R8, RZ, PT, P1 ; /* 0x000000ff0800720c */
/* 0x000fe40003f25310 */
/*03a0*/ IADD3 R4, P2, R4, 0x10, RZ ; /* 0x0000001004047810 */
/* 0x002fca0007f5e0ff */
/*03b0*/ IMAD.X R5, RZ, RZ, R5, P2 ; /* 0x000000ffff057224 */
/* 0x000fe400010e0605 */
/*03c0*/ FFMA R6, R7, R6, R13 ; /* 0x0000000607067223 */
/* 0x004fe4000000000d */
/*03d0*/ FMUL R7, R0, R7 ; /* 0x0000000700077220 */
/* 0x000fc60000400000 */
/*03e0*/ STG.E [R2.64], R6 ; /* 0x0000000602007986 */
/* 0x0001e2000c101908 */
/*03f0*/ @P1 BRA 0x270 ; /* 0xfffffe7000001947 */
/* 0x000fea000383ffff */
/*0400*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0410*/ IADD3 R8, P0, RZ, -R10, RZ ; /* 0x8000000aff087210 */
/* 0x000fe20007f1e0ff */
/*0420*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */
/* 0x000fe40000000a00 */
/*0430*/ ULEA UR6, UP0, UR4, UR6, 0x2 ; /* 0x0000000604067291 */
/* 0x000fe4000f80103f */
/*0440*/ IMAD.X R9, RZ, RZ, -0x1, P0 ; /* 0xffffffffff097424 */
/* 0x001fe400000e06ff */
/*0450*/ ULEA.HI.X UR4, UR4, UR7, UR5, 0x2, UP0 ; /* 0x0000000704047291 */
/* 0x000fca00080f1405 */
/*0460*/ MOV R4, UR6 ; /* 0x0000000600047c02 */
/* 0x000fe20008000f00 */
/*0470*/ IMAD.U32 R5, RZ, RZ, UR4 ; /* 0x00000004ff057e24 */
/* 0x000fca000f8e00ff */
/*0480*/ LDG.E R4, [R4.64] ; /* 0x0000000804047981 */
/* 0x000ea2000c1e1900 */
/*0490*/ IADD3 R8, P0, R8, 0x1, RZ ; /* 0x0000000108087810 */
/* 0x000fe20007f1e0ff */
/*04a0*/ UIADD3 UR6, UP0, UR6, 0x4, URZ ; /* 0x0000000406067890 */
/* 0x000fc6000ff1e03f */
/*04b0*/ IADD3.X R9, RZ, R9, RZ, P0, !PT ; /* 0x00000009ff097210 */
/* 0x000fe200007fe4ff */
/*04c0*/ UIADD3.X UR4, URZ, UR4, URZ, UP0, !UPT ; /* 0x000000043f047290 */
/* 0x000fe200087fe43f */
/*04d0*/ ISETP.NE.U32.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fc80003f05070 */
/*04e0*/ ISETP.NE.AND.EX P0, PT, R9, RZ, PT, P0 ; /* 0x000000ff0900720c */
/* 0x000fe20003f05300 */
/*04f0*/ FFMA R6, R4, R7.reuse, R6 ; /* 0x0000000704067223 */
/* 0x085fe40000000006 */
/*0500*/ FMUL R7, R0, R7 ; /* 0x0000000700077220 */
/* 0x000fc60000400000 */
/*0510*/ STG.E [R2.64], R6 ; /* 0x0000000602007986 */
/* 0x0001ee000c101908 */
/*0520*/ @P0 BRA 0x460 ; /* 0xffffff3000000947 */
/* 0x000fea000383ffff */
/*0530*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0540*/ BRA 0x540; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9expansionPfS_xx
.globl _Z9expansionPfS_xx
.p2align 8
.type _Z9expansionPfS_xx,@function
_Z9expansionPfS_xx:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
v_cmp_gt_i64_e32 vcc_lo, s[2:3], v[1:2]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_4
s_load_b64 s[4:5], s[0:1], 0x18
s_waitcnt lgkmcnt(0)
v_cmp_lt_i64_e64 s2, s[4:5], 1
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s2
s_cbranch_vccnz .LBB0_4
s_load_b128 s[0:3], s[0:1], 0x0
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_dual_mov_b32 v3, 1.0 :: v_dual_mov_b32 v4, 0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_mov_b64 s[0:1], 0
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_mov_b32_e32 v5, v2
.LBB0_3:
global_load_b32 v6, v4, s[2:3]
s_add_u32 s0, s0, 1
s_addc_u32 s1, s1, 0
s_add_u32 s2, s2, 4
v_cmp_lt_i64_e64 s6, s[0:1], s[4:5]
s_addc_u32 s3, s3, 0
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s6
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v5, v3, v6
v_mul_f32_e32 v3, v2, v3
global_store_b32 v[0:1], v5, off
s_cbranch_vccnz .LBB0_3
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9expansionPfS_xx
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9expansionPfS_xx, .Lfunc_end0-_Z9expansionPfS_xx
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 8
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9expansionPfS_xx
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9expansionPfS_xx.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0009e172_00000000-6_polyExp.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3778:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3778:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii
.type _Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii, @function
_Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii:
.LFB3800:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z9expansionPfS_xxiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3800:
.size _Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii, .-_Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii
.globl _Z9expansionPfS_xxiii
.type _Z9expansionPfS_xxiii, @function
_Z9expansionPfS_xxiii:
.LFB3801:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3801:
.size _Z9expansionPfS_xxiii, .-_Z9expansionPfS_xxiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "\t"
.text
.globl main
.type main, @function
main:
.LFB3768:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $216, %rsp
.cfi_offset 15, -24
.cfi_offset 14, -32
.cfi_offset 13, -40
.cfi_offset 12, -48
.cfi_offset 3, -56
movq %rsi, %rbx
movq %fs:40, %rax
movq %rax, -56(%rbp)
xorl %eax, %eax
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtoll@PLT
movq %rax, %r13
movq %rax, -120(%rbp)
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtoll@PLT
movq %rax, %r12
movq %rax, -216(%rbp)
movq 24(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r15
movq %rax, -184(%rbp)
movl %eax, -232(%rbp)
movq 32(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r14
movq 40(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rcx
movq %rax, -176(%rbp)
movl %eax, -228(%rbp)
movslq %r15d, %rbx
movq %rbx, -152(%rbp)
movslq %eax, %rdi
movq %rdi, -160(%rbp)
movl %r15d, %eax
subl $1, %eax
cltq
addq %r13, %rax
cqto
idivq %rbx
leal -1(%rcx), %edx
movslq %edx, %rdx
addq %rdx, %rax
cqto
idivq %rdi
movq %rax, -144(%rbp)
movl %r15d, %eax
imull %ecx, %eax
movslq %eax, %r15
leaq 0(,%r15,4), %rax
movq %rax, -128(%rbp)
leaq 4(,%r12,4), %r13
movslq %r14d, %rax
movq %rax, -136(%rbp)
leaq 15(,%rax,8), %rax
movq %rax, %rcx
andq $-16, %rcx
andq $-4096, %rax
movq %rsp, %rdx
subq %rax, %rdx
.L12:
cmpq %rdx, %rsp
je .L13
subq $4096, %rsp
orq $0, 4088(%rsp)
jmp .L12
.L13:
movq %rcx, %rax
andl $4095, %eax
subq %rax, %rsp
testq %rax, %rax
je .L14
orq $0, -8(%rsp,%rax)
.L14:
movq %rsp, %rcx
movq %rcx, -168(%rbp)
testl %r14d, %r14d
jle .L15
movq %rcx, %rbx
leal -1(%r14), %eax
leaq 8(%rcx,%rax,8), %r12
.L16:
movq %rbx, %rdi
call cudaStreamCreate@PLT
addq $8, %rbx
cmpq %r12, %rbx
jne .L16
.L15:
movq -120(%rbp), %rbx
leaq 0(,%rbx,4), %rsi
leaq -112(%rbp), %rdi
call cudaMallocHost@PLT
leaq -96(%rbp), %rdi
movq %r13, %rsi
call cudaMallocHost@PLT
movq -136(%rbp), %rsi
movq -128(%rbp), %rax
imulq %rax, %rsi
leaq -104(%rbp), %rdi
call cudaMalloc@PLT
leaq -88(%rbp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movq %rbx, %rcx
testq %rbx, %rbx
jle .L17
movl $0, %eax
movss .LC0(%rip), %xmm0
.L18:
movq -112(%rbp), %rdx
movss %xmm0, (%rdx,%rax,4)
addq $1, %rax
cmpq %rcx, %rax
jne .L18
.L17:
movq -216(%rbp), %rax
testq %rax, %rax
js .L19
leaq 1(%rax), %rcx
movl $0, %eax
movss .LC0(%rip), %xmm0
.L20:
movq -96(%rbp), %rdx
movss %xmm0, (%rdx,%rax,4)
addq $1, %rax
cmpq %rcx, %rax
jne .L20
.L19:
call _ZNSt6chrono3_V212steady_clock3nowEv@PLT
movq %rax, -240(%rbp)
movl $0, %r8d
movl $1, %ecx
movq %r13, %rdx
movq -96(%rbp), %rsi
movq -88(%rbp), %rdi
call cudaMemcpyAsync@PLT
cmpq $0, -144(%rbp)
jle .L21
movq -152(%rbp), %rax
movq -160(%rbp), %rcx
imulq %rcx, %rax
movq %rax, -192(%rbp)
movq -120(%rbp), %rax
addq %r15, %rax
salq $2, %rax
movq %rax, -224(%rbp)
movq %r15, %rax
negq %rax
salq $2, %rax
movq %rax, -200(%rbp)
movl $0, %r12d
movq %r14, -248(%rbp)
jmp .L26
.L23:
addq -104(%rbp), %rbx
movq %rbx, %rsi
movq %r13, %rdi
addq -112(%rbp), %rdi
movq (%r14), %r8
movl $2, %ecx
movq -128(%rbp), %rdx
call cudaMemcpyAsync@PLT
.L24:
addq $1, %r12
movq -192(%rbp), %rax
addq %rax, %r15
cmpq %r12, -144(%rbp)
je .L38
.L26:
cmpq %r15, -120(%rbp)
jle .L22
movq %r12, %rax
cqto
idivq -136(%rbp)
movq -168(%rbp), %rax
leaq (%rax,%rdx,8), %r14
movq -200(%rbp), %rax
leaq (%rax,%r15,4), %r13
movq -160(%rbp), %rax
imulq %rax, %rdx
movq -152(%rbp), %rax
imulq %rax, %rdx
leaq 0(,%rdx,4), %rbx
movq %r13, %rsi
addq -112(%rbp), %rsi
movq %rbx, %rdi
addq -104(%rbp), %rdi
movq (%r14), %r8
movl $1, %ecx
movq -128(%rbp), %rdx
call cudaMemcpyAsync@PLT
movl -184(%rbp), %eax
movl %eax, -68(%rbp)
movl $1, -64(%rbp)
movl $1, -60(%rbp)
movl -176(%rbp), %eax
movl %eax, -80(%rbp)
movl $1, -76(%rbp)
movl $1, -72(%rbp)
movq (%r14), %r9
movl $0, %r8d
movq -68(%rbp), %rdx
movl $1, %ecx
movq -80(%rbp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L23
movq %rbx, %rdi
addq -104(%rbp), %rdi
subq $8, %rsp
movl -232(%rbp), %eax
pushq %rax
movl -228(%rbp), %r9d
movl %r12d, %r8d
movq -216(%rbp), %rcx
movq -120(%rbp), %rdx
movq -88(%rbp), %rsi
call _Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii
addq $16, %rsp
jmp .L23
.L22:
movq %r12, %rax
cqto
idivq -136(%rbp)
movq -168(%rbp), %rax
leaq (%rax,%rdx,8), %r14
movq %r15, %rax
negq %rax
movq -224(%rbp), %rcx
leaq (%rcx,%rax,4), %rax
movq %rax, -208(%rbp)
movq -200(%rbp), %rcx
leaq (%rcx,%r15,4), %r13
movq -160(%rbp), %rcx
imulq %rcx, %rdx
movq -152(%rbp), %rcx
imulq %rcx, %rdx
leaq 0(,%rdx,4), %rbx
movq %r13, %rsi
addq -112(%rbp), %rsi
movq %rbx, %rdi
addq -104(%rbp), %rdi
movq (%r14), %r8
movl $1, %ecx
movq %rax, %rdx
call cudaMemcpyAsync@PLT
movl -184(%rbp), %eax
movl %eax, -68(%rbp)
movl $1, -64(%rbp)
movl $1, -60(%rbp)
movl -176(%rbp), %eax
movl %eax, -80(%rbp)
movl $1, -76(%rbp)
movl $1, -72(%rbp)
movq (%r14), %r9
movl $0, %r8d
movq -68(%rbp), %rdx
movl $1, %ecx
movq -80(%rbp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L39
.L25:
addq -104(%rbp), %rbx
movq %rbx, %rsi
movq %r13, %rdi
addq -112(%rbp), %rdi
movq (%r14), %r8
movl $2, %ecx
movq -208(%rbp), %rdx
call cudaMemcpyAsync@PLT
jmp .L24
.L39:
movq %rbx, %rdi
addq -104(%rbp), %rdi
subq $8, %rsp
movl -232(%rbp), %eax
pushq %rax
movl -228(%rbp), %r9d
movl %r12d, %r8d
movq -216(%rbp), %rcx
movq -120(%rbp), %rdx
movq -88(%rbp), %rsi
call _Z35__device_stub__Z9expansionPfS_xxiiiPfS_xxiii
addq $16, %rsp
jmp .L25
.L38:
movq -248(%rbp), %r14
.L21:
call cudaDeviceSynchronize@PLT
movl $70, %esi
leaq _ZSt4cout(%rip), %r13
movq %r13, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC1(%rip), %rbx
movq %rbx, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %r12
call cudaGetLastError@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rsi
movq %r12, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
call _ZNSt6chrono3_V212steady_clock3nowEv@PLT
movq -240(%rbp), %rsi
subq %rsi, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC2(%rip), %xmm0
movsd %xmm0, -128(%rbp)
movq -120(%rbp), %r12
movq %r12, %rsi
movq %r13, %rdi
call _ZNSo9_M_insertIxEERSoT_@PLT
movq %rax, %rdi
movq %rbx, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq -216(%rbp), %r15
movq %r15, %rsi
call _ZNSo9_M_insertIxEERSoT_@PLT
movq %rax, %rdi
movq %rbx, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %r15, %rdx
imulq %r12, %rdx
salq $2, %rdx
js .L27
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
.L28:
divsd -128(%rbp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
testl %r14d, %r14d
jle .L29
movq -168(%rbp), %rsi
movq %rsi, %rbx
leal -1(%r14), %eax
leaq 8(%rsi,%rax,8), %r12
.L30:
movq (%rbx), %rdi
call cudaStreamDestroy@PLT
addq $8, %rbx
cmpq %r12, %rbx
jne .L30
.L29:
movq -104(%rbp), %rdi
call cudaFree@PLT
movq -88(%rbp), %rdi
call cudaFree@PLT
movq -56(%rbp), %rax
subq %fs:40, %rax
jne .L40
movl $0, %eax
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.L27:
.cfi_restore_state
shrq %rdx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
addsd %xmm0, %xmm0
jmp .L28
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3768:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z9expansionPfS_xxiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3803:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9expansionPfS_xxiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3803:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long 0
.long 1104006501
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "polyExp.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z24__device_stub__expansionPfS_xx # -- Begin function _Z24__device_stub__expansionPfS_xx
.p2align 4, 0x90
.type _Z24__device_stub__expansionPfS_xx,@function
_Z24__device_stub__expansionPfS_xx: # @_Z24__device_stub__expansionPfS_xx
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movq %rcx, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9expansionPfS_xx, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__expansionPfS_xx, .Lfunc_end0-_Z24__device_stub__expansionPfS_xx
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x41cdcd6500000000 # double 1.0E+9
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, 56(%rsp) # 8-byte Spill
movslq %eax, %r15
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
cltq
movq 24(%rbx), %rdi
movq %rax, %rbx
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, 40(%rsp) # 8-byte Spill
leaq 4(,%rbx,4), %r14
leaq (,%r15,4), %rbp
movq %rbp, %rdi
callq malloc
movq %rax, %r13
movq %r14, %rdi
callq malloc
movq %rax, %r12
leaq 16(%rsp), %rdi
movq %rbp, 8(%rsp) # 8-byte Spill
movq %rbp, %rsi
callq hipMalloc
leaq 32(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 176(%rsp), %rdi
movl $4, %esi
callq hipMalloc
testq %r15, %r15
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $1065353216, (%r13,%rax,4) # imm = 0x3F800000
incq %rax
cmpq %rax, %r15
jne .LBB1_2
.LBB1_3: # %.preheader
testq %rbx, %rbx
js .LBB1_6
# %bb.4: # %.lr.ph55.preheader
leaq 1(%rbx), %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_5: # %.lr.ph55
# =>This Inner Loop Header: Depth=1
movl $1073741824, (%r12,%rcx,4) # imm = 0x40000000
incq %rcx
cmpq %rcx, %rax
jne .LBB1_5
.LBB1_6: # %._crit_edge
movq %rbx, 24(%rsp) # 8-byte Spill
callq _ZNSt6chrono3_V212steady_clock3nowEv
movq %rax, 48(%rsp) # 8-byte Spill
movq 16(%rsp), %rdi
movq %r13, %rsi
movq 8(%rsp), %rbp # 8-byte Reload
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipDeviceSynchronize
callq _ZNSt6chrono3_V212steady_clock3nowEv
movq %rax, %rbx
movq 32(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 40(%rsp), %rcx # 8-byte Reload
leal -1(%rcx), %eax
cltq
addq %r15, %rax
movslq %ecx, %rcx
cqto
idivq %rcx
movl %eax, %edi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl %ecx, %edx
orq %rax, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_8
# %bb.7:
movq 16(%rsp), %rax
movq 32(%rsp), %rcx
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %r15, 120(%rsp)
movq 24(%rsp), %rax # 8-byte Reload
movq %rax, 112(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 112(%rsp), %rax
movq %rax, 168(%rsp)
leaq 96(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 64(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z9expansionPfS_xx, %edi
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_8:
callq hipGetLastError
movl %eax, %edi
callq hipGetErrorString
movq %rax, %rdi
callq puts@PLT
movq 16(%rsp), %rsi
movq %r13, %rdi
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
subq 48(%rsp), %rbx # 8-byte Folded Reload
cvtsi2sd %rbx, %xmm0
divsd .LCPI1_0(%rip), %xmm0
movsd %xmm0, 8(%rsp) # 8-byte Spill
callq hipGetLastError
movl %eax, %edi
callq hipGetErrorString
movq %rax, %rdi
callq puts@PLT
movl $_ZSt4cout, %edi
movq %r15, %rsi
callq _ZNSo9_M_insertIxEERSoT_
movq %rax, %rbx
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %rbx, %rdi
movq 24(%rsp), %rsi # 8-byte Reload
callq _ZNSo9_M_insertIxEERSoT_
movq %rax, %rbx
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
xorps %xmm0, %xmm0
cvtsi2sdl 56(%rsp), %xmm0 # 4-byte Folded Reload
divsd 8(%rsp), %xmm0 # 8-byte Folded Reload
movq %rbx, %rdi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.2, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9expansionPfS_xx, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9expansionPfS_xx,@object # @_Z9expansionPfS_xx
.section .rodata,"a",@progbits
.globl _Z9expansionPfS_xx
.p2align 3, 0x0
_Z9expansionPfS_xx:
.quad _Z24__device_stub__expansionPfS_xx
.size _Z9expansionPfS_xx, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "\t"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\n"
.size .L.str.2, 2
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9expansionPfS_xx"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__expansionPfS_xx
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9expansionPfS_xx
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<stdio.h>
#include<iostream>
//using the struct of cudaDeviceProp getting the information of gpu
void printDeviceProp(cudaDeviceProp devProp){
printf("Name: %s\n", devProp.name);
printf("Maximum thread per block: %d\n", devProp.maxThreadsPerBlock);
for(int i = 0; i < 3; i++)
printf("Maximum dimension of block: %d\t %d\n",i, devProp.maxThreadsDim[i]);
return;
}
//A single thread for each of the n elements, and each thread computes its array index using blockIdx.x*blockDim.x + threadIdx.x.
__global__ void saxpy(int n, float a, float *x, float *y){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
y[i] = a*x[i] + y[i];
__syncthreads();//syncronizing threads in kernel calls
}
}
int main(int argc, char* argv[]){
int devCount;
int N;
float A;
float* d_x;
float* d_y;
//N = atoi(argv[1]);
//A = atoi(argv[2]);
cudaGetDeviceCount(&devCount);
printf("%d Cuda devices\n", devCount);
for(int k = 0; k < devCount; ++k){
printf("\nCuda Device %d\n",k);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, k);
printDeviceProp(devProp);
}
printf("Size of array N: ");
scanf("%d", &N);
printf("Size of scalar value A: ");
scanf("%f", &A);
//Allocates space from host memory
float* h_x = (float*)malloc(N*sizeof(float));
float* h_y = (float*)malloc(N*sizeof(float));
cudaMalloc((void**)&d_x, N*sizeof(float));
cudaMalloc((void**)&d_y, N*sizeof(float));
for(int i=0; i < N; i++){
//Generates random values between 0 and 256 and assigns to allocated space
h_x[i] = (float) (rand() % 256);
h_y[i] = (float) (rand() % 256);
printf("x = %f\n",h_x[i]);
printf("y = %f\n",h_y[i]);
}
//Sends from host to device
cudaMemcpy(d_x, h_x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, N*sizeof(float), cudaMemcpyHostToDevice);
int blocks = (N + 255)/256;
printf("Block number: %d\n",blocks);
//calls the kernel function
//The first argument in the execution specifies the number of thread blocks in the grid, and the second specifies the number of threads in a thread block.
saxpy<<<blocks,256>>>(N, A, d_x, d_y);
//returns the value from device to host
cudaMemcpy(h_y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
//Printing the host value after returned from device to host
for(int i=0; i < N; i++){
printf("y = %f\n",h_y[i]);
}
cudaFree(d_x);
cudaFree(d_y);
free(h_x);
free(h_y);
return 0;
} | code for sm_80
Function : _Z5saxpyifPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x160], PT ; /* 0x0000580004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fc800078e0205 */
/*0090*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fe400078e0205 */
/*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ FFMA R7, R2, c[0x0][0x164], R7 ; /* 0x0000590002077a23 */
/* 0x004fca0000000007 */
/*00d0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe8000c101904 */
/*00e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<stdio.h>
#include<iostream>
//using the struct of cudaDeviceProp getting the information of gpu
void printDeviceProp(cudaDeviceProp devProp){
printf("Name: %s\n", devProp.name);
printf("Maximum thread per block: %d\n", devProp.maxThreadsPerBlock);
for(int i = 0; i < 3; i++)
printf("Maximum dimension of block: %d\t %d\n",i, devProp.maxThreadsDim[i]);
return;
}
//A single thread for each of the n elements, and each thread computes its array index using blockIdx.x*blockDim.x + threadIdx.x.
__global__ void saxpy(int n, float a, float *x, float *y){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
y[i] = a*x[i] + y[i];
__syncthreads();//syncronizing threads in kernel calls
}
}
int main(int argc, char* argv[]){
int devCount;
int N;
float A;
float* d_x;
float* d_y;
//N = atoi(argv[1]);
//A = atoi(argv[2]);
cudaGetDeviceCount(&devCount);
printf("%d Cuda devices\n", devCount);
for(int k = 0; k < devCount; ++k){
printf("\nCuda Device %d\n",k);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, k);
printDeviceProp(devProp);
}
printf("Size of array N: ");
scanf("%d", &N);
printf("Size of scalar value A: ");
scanf("%f", &A);
//Allocates space from host memory
float* h_x = (float*)malloc(N*sizeof(float));
float* h_y = (float*)malloc(N*sizeof(float));
cudaMalloc((void**)&d_x, N*sizeof(float));
cudaMalloc((void**)&d_y, N*sizeof(float));
for(int i=0; i < N; i++){
//Generates random values between 0 and 256 and assigns to allocated space
h_x[i] = (float) (rand() % 256);
h_y[i] = (float) (rand() % 256);
printf("x = %f\n",h_x[i]);
printf("y = %f\n",h_y[i]);
}
//Sends from host to device
cudaMemcpy(d_x, h_x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, N*sizeof(float), cudaMemcpyHostToDevice);
int blocks = (N + 255)/256;
printf("Block number: %d\n",blocks);
//calls the kernel function
//The first argument in the execution specifies the number of thread blocks in the grid, and the second specifies the number of threads in a thread block.
saxpy<<<blocks,256>>>(N, A, d_x, d_y);
//returns the value from device to host
cudaMemcpy(h_y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
//Printing the host value after returned from device to host
for(int i=0; i < N; i++){
printf("y = %f\n",h_y[i]);
}
cudaFree(d_x);
cudaFree(d_y);
free(h_x);
free(h_y);
return 0;
} | .file "tmpxft_00077f13_00000000-6_saxpy.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Name: %s\n"
.LC1:
.string "Maximum thread per block: %d\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "Maximum dimension of block: %d\t %d\n"
.text
.globl _Z15printDeviceProp14cudaDeviceProp
.type _Z15printDeviceProp14cudaDeviceProp, @function
_Z15printDeviceProp14cudaDeviceProp:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
leaq 32(%rsp), %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 352(%rsp), %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %ebx
leaq .LC2(%rip), %rbp
.L4:
movl 356(%rsp,%rbx,4), %ecx
movl %ebx, %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $3, %rbx
jne .L4
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3669:
.size _Z15printDeviceProp14cudaDeviceProp, .-_Z15printDeviceProp14cudaDeviceProp
.globl _Z28__device_stub__Z5saxpyifPfS_ifPfS_
.type _Z28__device_stub__Z5saxpyifPfS_ifPfS_, @function
_Z28__device_stub__Z5saxpyifPfS_ifPfS_:
.LFB3695:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movss %xmm0, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5saxpyifPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z28__device_stub__Z5saxpyifPfS_ifPfS_, .-_Z28__device_stub__Z5saxpyifPfS_ifPfS_
.globl _Z5saxpyifPfS_
.type _Z5saxpyifPfS_, @function
_Z5saxpyifPfS_:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z5saxpyifPfS_ifPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z5saxpyifPfS_, .-_Z5saxpyifPfS_
.section .rodata.str1.1
.LC3:
.string "%d Cuda devices\n"
.LC4:
.string "\nCuda Device %d\n"
.LC5:
.string "Size of array N: "
.LC6:
.string "%d"
.LC7:
.string "Size of scalar value A: "
.LC8:
.string "%f"
.LC9:
.string "x = %f\n"
.LC10:
.string "y = %f\n"
.LC11:
.string "Block number: %d\n"
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $1104, %rsp
.cfi_def_cfa_offset 1152
movq %fs:40, %rax
movq %rax, 1096(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 12(%rsp)
jle .L16
movl $0, %ebx
leaq .LC4(%rip), %r12
.L17:
movl %ebx, %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 64(%rsp), %rbp
movl %ebx, %esi
movq %rbp, %rdi
call cudaGetDeviceProperties_v2@PLT
subq $1040, %rsp
.cfi_def_cfa_offset 2192
movl $129, %ecx
movq %rsp, %rdi
movq %rbp, %rsi
rep movsq
call _Z15printDeviceProp14cudaDeviceProp
addl $1, %ebx
addq $1040, %rsp
.cfi_def_cfa_offset 1152
cmpl %ebx, 12(%rsp)
jg .L17
.L16:
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 16(%rsp), %rsi
leaq .LC6(%rip), %rdi
movl $0, %eax
call __isoc23_scanf@PLT
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 20(%rsp), %rsi
leaq .LC8(%rip), %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movslq 16(%rsp), %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r12
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbp
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movslq 16(%rsp), %rsi
salq $2, %rsi
leaq 32(%rsp), %rdi
call cudaMalloc@PLT
movl 16(%rsp), %edx
testl %edx, %edx
jle .L18
movl $0, %ebx
leaq .LC9(%rip), %r14
leaq .LC10(%rip), %r13
.L19:
call rand@PLT
cltd
shrl $24, %edx
addl %edx, %eax
movzbl %al, %eax
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%r12,%rbx,4)
call rand@PLT
cltd
shrl $24, %edx
addl %edx, %eax
movzbl %al, %eax
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rbx,4)
pxor %xmm0, %xmm0
cvtss2sd (%r12,%rbx,4), %xmm0
movq %r14, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 0(%rbp,%rbx,4), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl 16(%rsp), %edx
addq $1, %rbx
cmpl %ebx, %edx
jg .L19
.L18:
movslq %edx, %rdx
salq $2, %rdx
movl $1, %ecx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movslq 16(%rsp), %rdx
salq $2, %rdx
movl $1, %ecx
movq %rbp, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl 16(%rsp), %eax
leal 510(%rax), %ebx
addl $255, %eax
cmovns %eax, %ebx
sarl $8, %ebx
movl %ebx, %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $256, 52(%rsp)
movl $1, 56(%rsp)
movl %ebx, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 52(%rsp), %rdx
movl $1, %ecx
movq 40(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L28
.L20:
movslq 16(%rsp), %rdx
salq $2, %rdx
movl $2, %ecx
movq 32(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
cmpl $0, 16(%rsp)
jle .L21
movl $0, %ebx
leaq .LC10(%rip), %r13
.L22:
pxor %xmm0, %xmm0
cvtss2sd 0(%rbp,%rbx,4), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpl %ebx, 16(%rsp)
jg .L22
.L21:
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq 1096(%rsp), %rax
subq %fs:40, %rax
jne .L29
movl $0, %eax
addq $1104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movss 20(%rsp), %xmm0
movl 16(%rsp), %edi
call _Z28__device_stub__Z5saxpyifPfS_ifPfS_
jmp .L20
.L29:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size main, .-main
.section .rodata.str1.1
.LC12:
.string "_Z5saxpyifPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z5saxpyifPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<stdio.h>
#include<iostream>
//using the struct of cudaDeviceProp getting the information of gpu
void printDeviceProp(cudaDeviceProp devProp){
printf("Name: %s\n", devProp.name);
printf("Maximum thread per block: %d\n", devProp.maxThreadsPerBlock);
for(int i = 0; i < 3; i++)
printf("Maximum dimension of block: %d\t %d\n",i, devProp.maxThreadsDim[i]);
return;
}
//A single thread for each of the n elements, and each thread computes its array index using blockIdx.x*blockDim.x + threadIdx.x.
__global__ void saxpy(int n, float a, float *x, float *y){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
y[i] = a*x[i] + y[i];
__syncthreads();//syncronizing threads in kernel calls
}
}
int main(int argc, char* argv[]){
int devCount;
int N;
float A;
float* d_x;
float* d_y;
//N = atoi(argv[1]);
//A = atoi(argv[2]);
cudaGetDeviceCount(&devCount);
printf("%d Cuda devices\n", devCount);
for(int k = 0; k < devCount; ++k){
printf("\nCuda Device %d\n",k);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, k);
printDeviceProp(devProp);
}
printf("Size of array N: ");
scanf("%d", &N);
printf("Size of scalar value A: ");
scanf("%f", &A);
//Allocates space from host memory
float* h_x = (float*)malloc(N*sizeof(float));
float* h_y = (float*)malloc(N*sizeof(float));
cudaMalloc((void**)&d_x, N*sizeof(float));
cudaMalloc((void**)&d_y, N*sizeof(float));
for(int i=0; i < N; i++){
//Generates random values between 0 and 256 and assigns to allocated space
h_x[i] = (float) (rand() % 256);
h_y[i] = (float) (rand() % 256);
printf("x = %f\n",h_x[i]);
printf("y = %f\n",h_y[i]);
}
//Sends from host to device
cudaMemcpy(d_x, h_x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, N*sizeof(float), cudaMemcpyHostToDevice);
int blocks = (N + 255)/256;
printf("Block number: %d\n",blocks);
//calls the kernel function
//The first argument in the execution specifies the number of thread blocks in the grid, and the second specifies the number of threads in a thread block.
saxpy<<<blocks,256>>>(N, A, d_x, d_y);
//returns the value from device to host
cudaMemcpy(h_y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
//Printing the host value after returned from device to host
for(int i=0; i < N; i++){
printf("y = %f\n",h_y[i]);
}
cudaFree(d_x);
cudaFree(d_y);
free(h_x);
free(h_y);
return 0;
} | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<iostream>
//using the struct of cudaDeviceProp getting the information of gpu
void printDeviceProp(hipDeviceProp_t devProp){
printf("Name: %s\n", devProp.name);
printf("Maximum thread per block: %d\n", devProp.maxThreadsPerBlock);
for(int i = 0; i < 3; i++)
printf("Maximum dimension of block: %d\t %d\n",i, devProp.maxThreadsDim[i]);
return;
}
//A single thread for each of the n elements, and each thread computes its array index using blockIdx.x*blockDim.x + threadIdx.x.
__global__ void saxpy(int n, float a, float *x, float *y){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
y[i] = a*x[i] + y[i];
__syncthreads();//syncronizing threads in kernel calls
}
}
int main(int argc, char* argv[]){
int devCount;
int N;
float A;
float* d_x;
float* d_y;
//N = atoi(argv[1]);
//A = atoi(argv[2]);
hipGetDeviceCount(&devCount);
printf("%d Cuda devices\n", devCount);
for(int k = 0; k < devCount; ++k){
printf("\nCuda Device %d\n",k);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, k);
printDeviceProp(devProp);
}
printf("Size of array N: ");
scanf("%d", &N);
printf("Size of scalar value A: ");
scanf("%f", &A);
//Allocates space from host memory
float* h_x = (float*)malloc(N*sizeof(float));
float* h_y = (float*)malloc(N*sizeof(float));
hipMalloc((void**)&d_x, N*sizeof(float));
hipMalloc((void**)&d_y, N*sizeof(float));
for(int i=0; i < N; i++){
//Generates random values between 0 and 256 and assigns to allocated space
h_x[i] = (float) (rand() % 256);
h_y[i] = (float) (rand() % 256);
printf("x = %f\n",h_x[i]);
printf("y = %f\n",h_y[i]);
}
//Sends from host to device
hipMemcpy(d_x, h_x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, h_y, N*sizeof(float), hipMemcpyHostToDevice);
int blocks = (N + 255)/256;
printf("Block number: %d\n",blocks);
//calls the kernel function
//The first argument in the execution specifies the number of thread blocks in the grid, and the second specifies the number of threads in a thread block.
saxpy<<<blocks,256>>>(N, A, d_x, d_y);
//returns the value from device to host
hipMemcpy(h_y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
//Printing the host value after returned from device to host
for(int i=0; i < N; i++){
printf("y = %f\n",h_y[i]);
}
hipFree(d_x);
hipFree(d_y);
free(h_x);
free(h_y);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<iostream>
//using the struct of cudaDeviceProp getting the information of gpu
void printDeviceProp(hipDeviceProp_t devProp){
printf("Name: %s\n", devProp.name);
printf("Maximum thread per block: %d\n", devProp.maxThreadsPerBlock);
for(int i = 0; i < 3; i++)
printf("Maximum dimension of block: %d\t %d\n",i, devProp.maxThreadsDim[i]);
return;
}
//A single thread for each of the n elements, and each thread computes its array index using blockIdx.x*blockDim.x + threadIdx.x.
__global__ void saxpy(int n, float a, float *x, float *y){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
y[i] = a*x[i] + y[i];
__syncthreads();//syncronizing threads in kernel calls
}
}
int main(int argc, char* argv[]){
int devCount;
int N;
float A;
float* d_x;
float* d_y;
//N = atoi(argv[1]);
//A = atoi(argv[2]);
hipGetDeviceCount(&devCount);
printf("%d Cuda devices\n", devCount);
for(int k = 0; k < devCount; ++k){
printf("\nCuda Device %d\n",k);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, k);
printDeviceProp(devProp);
}
printf("Size of array N: ");
scanf("%d", &N);
printf("Size of scalar value A: ");
scanf("%f", &A);
//Allocates space from host memory
float* h_x = (float*)malloc(N*sizeof(float));
float* h_y = (float*)malloc(N*sizeof(float));
hipMalloc((void**)&d_x, N*sizeof(float));
hipMalloc((void**)&d_y, N*sizeof(float));
for(int i=0; i < N; i++){
//Generates random values between 0 and 256 and assigns to allocated space
h_x[i] = (float) (rand() % 256);
h_y[i] = (float) (rand() % 256);
printf("x = %f\n",h_x[i]);
printf("y = %f\n",h_y[i]);
}
//Sends from host to device
hipMemcpy(d_x, h_x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, h_y, N*sizeof(float), hipMemcpyHostToDevice);
int blocks = (N + 255)/256;
printf("Block number: %d\n",blocks);
//calls the kernel function
//The first argument in the execution specifies the number of thread blocks in the grid, and the second specifies the number of threads in a thread block.
saxpy<<<blocks,256>>>(N, A, d_x, d_y);
//returns the value from device to host
hipMemcpy(h_y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
//Printing the host value after returned from device to host
for(int i=0; i < N; i++){
printf("y = %f\n",h_y[i]);
}
hipFree(d_x);
hipFree(d_y);
free(h_x);
free(h_y);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5saxpyifPfS_
.globl _Z5saxpyifPfS_
.p2align 8
.type _Z5saxpyifPfS_,@function
_Z5saxpyifPfS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_load_b32 s0, s[0:1], 0x4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v3, s0, v2
global_store_b32 v[0:1], v3, off
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
.LBB0_2:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5saxpyifPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5saxpyifPfS_, .Lfunc_end0-_Z5saxpyifPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5saxpyifPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z5saxpyifPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<iostream>
//using the struct of cudaDeviceProp getting the information of gpu
void printDeviceProp(hipDeviceProp_t devProp){
printf("Name: %s\n", devProp.name);
printf("Maximum thread per block: %d\n", devProp.maxThreadsPerBlock);
for(int i = 0; i < 3; i++)
printf("Maximum dimension of block: %d\t %d\n",i, devProp.maxThreadsDim[i]);
return;
}
//A single thread for each of the n elements, and each thread computes its array index using blockIdx.x*blockDim.x + threadIdx.x.
__global__ void saxpy(int n, float a, float *x, float *y){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
y[i] = a*x[i] + y[i];
__syncthreads();//syncronizing threads in kernel calls
}
}
int main(int argc, char* argv[]){
int devCount;
int N;
float A;
float* d_x;
float* d_y;
//N = atoi(argv[1]);
//A = atoi(argv[2]);
hipGetDeviceCount(&devCount);
printf("%d Cuda devices\n", devCount);
for(int k = 0; k < devCount; ++k){
printf("\nCuda Device %d\n",k);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, k);
printDeviceProp(devProp);
}
printf("Size of array N: ");
scanf("%d", &N);
printf("Size of scalar value A: ");
scanf("%f", &A);
//Allocates space from host memory
float* h_x = (float*)malloc(N*sizeof(float));
float* h_y = (float*)malloc(N*sizeof(float));
hipMalloc((void**)&d_x, N*sizeof(float));
hipMalloc((void**)&d_y, N*sizeof(float));
for(int i=0; i < N; i++){
//Generates random values between 0 and 256 and assigns to allocated space
h_x[i] = (float) (rand() % 256);
h_y[i] = (float) (rand() % 256);
printf("x = %f\n",h_x[i]);
printf("y = %f\n",h_y[i]);
}
//Sends from host to device
hipMemcpy(d_x, h_x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, h_y, N*sizeof(float), hipMemcpyHostToDevice);
int blocks = (N + 255)/256;
printf("Block number: %d\n",blocks);
//calls the kernel function
//The first argument in the execution specifies the number of thread blocks in the grid, and the second specifies the number of threads in a thread block.
saxpy<<<blocks,256>>>(N, A, d_x, d_y);
//returns the value from device to host
hipMemcpy(h_y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
//Printing the host value after returned from device to host
for(int i=0; i < N; i++){
printf("y = %f\n",h_y[i]);
}
hipFree(d_x);
hipFree(d_y);
free(h_x);
free(h_y);
return 0;
} | .text
.file "saxpy.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z15printDeviceProp20hipDeviceProp_tR0600 # -- Begin function _Z15printDeviceProp20hipDeviceProp_tR0600
.p2align 4, 0x90
.type _Z15printDeviceProp20hipDeviceProp_tR0600,@function
_Z15printDeviceProp20hipDeviceProp_tR0600: # @_Z15printDeviceProp20hipDeviceProp_tR0600
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
leaq 32(%rsp), %rbx
movl $.L.str, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq printf
movl 352(%rsp), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB0_1: # =>This Inner Loop Header: Depth=1
movl 324(%rbx,%r14,4), %edx
movl $.L.str.2, %edi
movl %r14d, %esi
xorl %eax, %eax
callq printf
incq %r14
cmpq $3, %r14
jne .LBB0_1
# %bb.2:
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z15printDeviceProp20hipDeviceProp_tR0600, .Lfunc_end0-_Z15printDeviceProp20hipDeviceProp_tR0600
.cfi_endproc
# -- End function
.globl _Z20__device_stub__saxpyifPfS_ # -- Begin function _Z20__device_stub__saxpyifPfS_
.p2align 4, 0x90
.type _Z20__device_stub__saxpyifPfS_,@function
_Z20__device_stub__saxpyifPfS_: # @_Z20__device_stub__saxpyifPfS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5saxpyifPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z20__device_stub__saxpyifPfS_, .Lfunc_end1-_Z20__device_stub__saxpyifPfS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $3048, %rsp # imm = 0xBE8
.cfi_def_cfa_offset 3088
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 12(%rsp), %rdi
callq hipGetDeviceCount
movl 12(%rsp), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
cmpl $0, 12(%rsp)
jle .LBB2_5
# %bb.1: # %.lr.ph
leaq 1576(%rsp), %rbx
leaq 96(%rsp), %r14
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB2_2: # =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
movl $.L.str.4, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
movl $1472, %edx # imm = 0x5C0
movq %r14, %rdi
movq %rbx, %rsi
callq memcpy@PLT
movl $.L.str, %edi
movq %r14, %rsi
xorl %eax, %eax
callq printf
movl 416(%rsp), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_3: # Parent Loop BB2_2 Depth=1
# => This Inner Loop Header: Depth=2
movl 420(%rsp,%r15,4), %edx
movl $.L.str.2, %edi
movl %r15d, %esi
xorl %eax, %eax
callq printf
incq %r15
cmpq $3, %r15
jne .LBB2_3
# %bb.4: # %_Z15printDeviceProp20hipDeviceProp_tR0600.exit
# in Loop: Header=BB2_2 Depth=1
incl %ebp
cmpl 12(%rsp), %ebp
jl .LBB2_2
.LBB2_5: # %._crit_edge
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
leaq 8(%rsp), %rsi
movl $.L.str.6, %edi
xorl %eax, %eax
callq __isoc23_scanf
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
leaq 36(%rsp), %rsi
movl $.L.str.8, %edi
xorl %eax, %eax
callq __isoc23_scanf
movslq 8(%rsp), %r15
shlq $2, %r15
movq %r15, %rdi
callq malloc
movq %rax, %rbx
movq %r15, %rdi
callq malloc
movq %rax, %r14
leaq 24(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movslq 8(%rsp), %rsi
shlq $2, %rsi
leaq 16(%rsp), %rdi
callq hipMalloc
movl 8(%rsp), %eax
testl %eax, %eax
jle .LBB2_8
# %bb.6: # %.lr.ph35.preheader
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_7: # %.lr.ph35
# =>This Inner Loop Header: Depth=1
callq rand
# kill: def $eax killed $eax def $rax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
andl $-256, %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%r15,4)
callq rand
# kill: def $eax killed $eax def $rax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
andl $-256, %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r14,%r15,4)
movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.9, %edi
movb $1, %al
callq printf
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.10, %edi
movb $1, %al
callq printf
incq %r15
movslq 8(%rsp), %rax
cmpq %rax, %r15
jl .LBB2_7
.LBB2_8: # %._crit_edge36
movq 24(%rsp), %rdi
movslq %eax, %rdx
shlq $2, %rdx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movslq 8(%rsp), %rdx
shlq $2, %rdx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movl 8(%rsp), %r15d
leal 255(%r15), %eax
addl $510, %r15d # imm = 0x1FE
testl %eax, %eax
cmovnsl %eax, %r15d
sarl $8, %r15d
movl $.L.str.11, %edi
movl %r15d, %esi
xorl %eax, %eax
callq printf
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %r15
orq $256, %rdx # imm = 0x100
movq %r15, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_10
# %bb.9:
movl 8(%rsp), %eax
movss 36(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movl %eax, 44(%rsp)
movss %xmm0, 40(%rsp)
movq %rcx, 88(%rsp)
movq %rdx, 80(%rsp)
leaq 44(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 1576(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 1576(%rsp), %rsi
movl 1584(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z5saxpyifPfS_, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_10:
movq 16(%rsp), %rsi
movslq 8(%rsp), %rdx
shlq $2, %rdx
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
cmpl $0, 8(%rsp)
jle .LBB2_13
# %bb.11: # %.lr.ph39.preheader
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_12: # %.lr.ph39
# =>This Inner Loop Header: Depth=1
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.10, %edi
movb $1, %al
callq printf
incq %r15
movslq 8(%rsp), %rax
cmpq %rax, %r15
jl .LBB2_12
.LBB2_13: # %._crit_edge40
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
xorl %eax, %eax
addq $3048, %rsp # imm = 0xBE8
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5saxpyifPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Name: %s\n"
.size .L.str, 10
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Maximum thread per block: %d\n"
.size .L.str.1, 30
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Maximum dimension of block: %d\t %d\n"
.size .L.str.2, 36
.type _Z5saxpyifPfS_,@object # @_Z5saxpyifPfS_
.section .rodata,"a",@progbits
.globl _Z5saxpyifPfS_
.p2align 3, 0x0
_Z5saxpyifPfS_:
.quad _Z20__device_stub__saxpyifPfS_
.size _Z5saxpyifPfS_, 8
.type .L.str.3,@object # @.str.3
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.3:
.asciz "%d Cuda devices\n"
.size .L.str.3, 17
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\nCuda Device %d\n"
.size .L.str.4, 17
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Size of array N: "
.size .L.str.5, 18
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "%d"
.size .L.str.6, 3
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Size of scalar value A: "
.size .L.str.7, 25
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%f"
.size .L.str.8, 3
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "x = %f\n"
.size .L.str.9, 8
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "y = %f\n"
.size .L.str.10, 8
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "Block number: %d\n"
.size .L.str.11, 18
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z5saxpyifPfS_"
.size .L__unnamed_1, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__saxpyifPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5saxpyifPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z5saxpyifPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x160], PT ; /* 0x0000580004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fc800078e0205 */
/*0090*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fe400078e0205 */
/*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ FFMA R7, R2, c[0x0][0x164], R7 ; /* 0x0000590002077a23 */
/* 0x004fca0000000007 */
/*00d0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe8000c101904 */
/*00e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5saxpyifPfS_
.globl _Z5saxpyifPfS_
.p2align 8
.type _Z5saxpyifPfS_,@function
_Z5saxpyifPfS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_load_b32 s0, s[0:1], 0x4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v3, s0, v2
global_store_b32 v[0:1], v3, off
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
.LBB0_2:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5saxpyifPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5saxpyifPfS_, .Lfunc_end0-_Z5saxpyifPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5saxpyifPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z5saxpyifPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00077f13_00000000-6_saxpy.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Name: %s\n"
.LC1:
.string "Maximum thread per block: %d\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "Maximum dimension of block: %d\t %d\n"
.text
.globl _Z15printDeviceProp14cudaDeviceProp
.type _Z15printDeviceProp14cudaDeviceProp, @function
_Z15printDeviceProp14cudaDeviceProp:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
leaq 32(%rsp), %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 352(%rsp), %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %ebx
leaq .LC2(%rip), %rbp
.L4:
movl 356(%rsp,%rbx,4), %ecx
movl %ebx, %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $3, %rbx
jne .L4
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3669:
.size _Z15printDeviceProp14cudaDeviceProp, .-_Z15printDeviceProp14cudaDeviceProp
.globl _Z28__device_stub__Z5saxpyifPfS_ifPfS_
.type _Z28__device_stub__Z5saxpyifPfS_ifPfS_, @function
_Z28__device_stub__Z5saxpyifPfS_ifPfS_:
.LFB3695:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movss %xmm0, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5saxpyifPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z28__device_stub__Z5saxpyifPfS_ifPfS_, .-_Z28__device_stub__Z5saxpyifPfS_ifPfS_
.globl _Z5saxpyifPfS_
.type _Z5saxpyifPfS_, @function
_Z5saxpyifPfS_:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z5saxpyifPfS_ifPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z5saxpyifPfS_, .-_Z5saxpyifPfS_
.section .rodata.str1.1
.LC3:
.string "%d Cuda devices\n"
.LC4:
.string "\nCuda Device %d\n"
.LC5:
.string "Size of array N: "
.LC6:
.string "%d"
.LC7:
.string "Size of scalar value A: "
.LC8:
.string "%f"
.LC9:
.string "x = %f\n"
.LC10:
.string "y = %f\n"
.LC11:
.string "Block number: %d\n"
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $1104, %rsp
.cfi_def_cfa_offset 1152
movq %fs:40, %rax
movq %rax, 1096(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 12(%rsp)
jle .L16
movl $0, %ebx
leaq .LC4(%rip), %r12
.L17:
movl %ebx, %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 64(%rsp), %rbp
movl %ebx, %esi
movq %rbp, %rdi
call cudaGetDeviceProperties_v2@PLT
subq $1040, %rsp
.cfi_def_cfa_offset 2192
movl $129, %ecx
movq %rsp, %rdi
movq %rbp, %rsi
rep movsq
call _Z15printDeviceProp14cudaDeviceProp
addl $1, %ebx
addq $1040, %rsp
.cfi_def_cfa_offset 1152
cmpl %ebx, 12(%rsp)
jg .L17
.L16:
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 16(%rsp), %rsi
leaq .LC6(%rip), %rdi
movl $0, %eax
call __isoc23_scanf@PLT
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 20(%rsp), %rsi
leaq .LC8(%rip), %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movslq 16(%rsp), %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r12
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbp
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movslq 16(%rsp), %rsi
salq $2, %rsi
leaq 32(%rsp), %rdi
call cudaMalloc@PLT
movl 16(%rsp), %edx
testl %edx, %edx
jle .L18
movl $0, %ebx
leaq .LC9(%rip), %r14
leaq .LC10(%rip), %r13
.L19:
call rand@PLT
cltd
shrl $24, %edx
addl %edx, %eax
movzbl %al, %eax
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%r12,%rbx,4)
call rand@PLT
cltd
shrl $24, %edx
addl %edx, %eax
movzbl %al, %eax
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rbx,4)
pxor %xmm0, %xmm0
cvtss2sd (%r12,%rbx,4), %xmm0
movq %r14, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 0(%rbp,%rbx,4), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl 16(%rsp), %edx
addq $1, %rbx
cmpl %ebx, %edx
jg .L19
.L18:
movslq %edx, %rdx
salq $2, %rdx
movl $1, %ecx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movslq 16(%rsp), %rdx
salq $2, %rdx
movl $1, %ecx
movq %rbp, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl 16(%rsp), %eax
leal 510(%rax), %ebx
addl $255, %eax
cmovns %eax, %ebx
sarl $8, %ebx
movl %ebx, %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $256, 52(%rsp)
movl $1, 56(%rsp)
movl %ebx, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 52(%rsp), %rdx
movl $1, %ecx
movq 40(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L28
.L20:
movslq 16(%rsp), %rdx
salq $2, %rdx
movl $2, %ecx
movq 32(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
cmpl $0, 16(%rsp)
jle .L21
movl $0, %ebx
leaq .LC10(%rip), %r13
.L22:
pxor %xmm0, %xmm0
cvtss2sd 0(%rbp,%rbx,4), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpl %ebx, 16(%rsp)
jg .L22
.L21:
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq 1096(%rsp), %rax
subq %fs:40, %rax
jne .L29
movl $0, %eax
addq $1104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movss 20(%rsp), %xmm0
movl 16(%rsp), %edi
call _Z28__device_stub__Z5saxpyifPfS_ifPfS_
jmp .L20
.L29:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size main, .-main
.section .rodata.str1.1
.LC12:
.string "_Z5saxpyifPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z5saxpyifPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "saxpy.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z15printDeviceProp20hipDeviceProp_tR0600 # -- Begin function _Z15printDeviceProp20hipDeviceProp_tR0600
.p2align 4, 0x90
.type _Z15printDeviceProp20hipDeviceProp_tR0600,@function
_Z15printDeviceProp20hipDeviceProp_tR0600: # @_Z15printDeviceProp20hipDeviceProp_tR0600
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
leaq 32(%rsp), %rbx
movl $.L.str, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq printf
movl 352(%rsp), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB0_1: # =>This Inner Loop Header: Depth=1
movl 324(%rbx,%r14,4), %edx
movl $.L.str.2, %edi
movl %r14d, %esi
xorl %eax, %eax
callq printf
incq %r14
cmpq $3, %r14
jne .LBB0_1
# %bb.2:
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z15printDeviceProp20hipDeviceProp_tR0600, .Lfunc_end0-_Z15printDeviceProp20hipDeviceProp_tR0600
.cfi_endproc
# -- End function
.globl _Z20__device_stub__saxpyifPfS_ # -- Begin function _Z20__device_stub__saxpyifPfS_
.p2align 4, 0x90
.type _Z20__device_stub__saxpyifPfS_,@function
_Z20__device_stub__saxpyifPfS_: # @_Z20__device_stub__saxpyifPfS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5saxpyifPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z20__device_stub__saxpyifPfS_, .Lfunc_end1-_Z20__device_stub__saxpyifPfS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $3048, %rsp # imm = 0xBE8
.cfi_def_cfa_offset 3088
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 12(%rsp), %rdi
callq hipGetDeviceCount
movl 12(%rsp), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
cmpl $0, 12(%rsp)
jle .LBB2_5
# %bb.1: # %.lr.ph
leaq 1576(%rsp), %rbx
leaq 96(%rsp), %r14
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB2_2: # =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
movl $.L.str.4, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
movl $1472, %edx # imm = 0x5C0
movq %r14, %rdi
movq %rbx, %rsi
callq memcpy@PLT
movl $.L.str, %edi
movq %r14, %rsi
xorl %eax, %eax
callq printf
movl 416(%rsp), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_3: # Parent Loop BB2_2 Depth=1
# => This Inner Loop Header: Depth=2
movl 420(%rsp,%r15,4), %edx
movl $.L.str.2, %edi
movl %r15d, %esi
xorl %eax, %eax
callq printf
incq %r15
cmpq $3, %r15
jne .LBB2_3
# %bb.4: # %_Z15printDeviceProp20hipDeviceProp_tR0600.exit
# in Loop: Header=BB2_2 Depth=1
incl %ebp
cmpl 12(%rsp), %ebp
jl .LBB2_2
.LBB2_5: # %._crit_edge
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
leaq 8(%rsp), %rsi
movl $.L.str.6, %edi
xorl %eax, %eax
callq __isoc23_scanf
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
leaq 36(%rsp), %rsi
movl $.L.str.8, %edi
xorl %eax, %eax
callq __isoc23_scanf
movslq 8(%rsp), %r15
shlq $2, %r15
movq %r15, %rdi
callq malloc
movq %rax, %rbx
movq %r15, %rdi
callq malloc
movq %rax, %r14
leaq 24(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movslq 8(%rsp), %rsi
shlq $2, %rsi
leaq 16(%rsp), %rdi
callq hipMalloc
movl 8(%rsp), %eax
testl %eax, %eax
jle .LBB2_8
# %bb.6: # %.lr.ph35.preheader
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_7: # %.lr.ph35
# =>This Inner Loop Header: Depth=1
callq rand
# kill: def $eax killed $eax def $rax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
andl $-256, %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%r15,4)
callq rand
# kill: def $eax killed $eax def $rax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
andl $-256, %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r14,%r15,4)
movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.9, %edi
movb $1, %al
callq printf
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.10, %edi
movb $1, %al
callq printf
incq %r15
movslq 8(%rsp), %rax
cmpq %rax, %r15
jl .LBB2_7
.LBB2_8: # %._crit_edge36
movq 24(%rsp), %rdi
movslq %eax, %rdx
shlq $2, %rdx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movslq 8(%rsp), %rdx
shlq $2, %rdx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movl 8(%rsp), %r15d
leal 255(%r15), %eax
addl $510, %r15d # imm = 0x1FE
testl %eax, %eax
cmovnsl %eax, %r15d
sarl $8, %r15d
movl $.L.str.11, %edi
movl %r15d, %esi
xorl %eax, %eax
callq printf
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %r15
orq $256, %rdx # imm = 0x100
movq %r15, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_10
# %bb.9:
movl 8(%rsp), %eax
movss 36(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movl %eax, 44(%rsp)
movss %xmm0, 40(%rsp)
movq %rcx, 88(%rsp)
movq %rdx, 80(%rsp)
leaq 44(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 1576(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 1576(%rsp), %rsi
movl 1584(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z5saxpyifPfS_, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_10:
movq 16(%rsp), %rsi
movslq 8(%rsp), %rdx
shlq $2, %rdx
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
cmpl $0, 8(%rsp)
jle .LBB2_13
# %bb.11: # %.lr.ph39.preheader
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_12: # %.lr.ph39
# =>This Inner Loop Header: Depth=1
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.10, %edi
movb $1, %al
callq printf
incq %r15
movslq 8(%rsp), %rax
cmpq %rax, %r15
jl .LBB2_12
.LBB2_13: # %._crit_edge40
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
xorl %eax, %eax
addq $3048, %rsp # imm = 0xBE8
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5saxpyifPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Name: %s\n"
.size .L.str, 10
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Maximum thread per block: %d\n"
.size .L.str.1, 30
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Maximum dimension of block: %d\t %d\n"
.size .L.str.2, 36
.type _Z5saxpyifPfS_,@object # @_Z5saxpyifPfS_
.section .rodata,"a",@progbits
.globl _Z5saxpyifPfS_
.p2align 3, 0x0
_Z5saxpyifPfS_:
.quad _Z20__device_stub__saxpyifPfS_
.size _Z5saxpyifPfS_, 8
.type .L.str.3,@object # @.str.3
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.3:
.asciz "%d Cuda devices\n"
.size .L.str.3, 17
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\nCuda Device %d\n"
.size .L.str.4, 17
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Size of array N: "
.size .L.str.5, 18
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "%d"
.size .L.str.6, 3
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Size of scalar value A: "
.size .L.str.7, 25
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%f"
.size .L.str.8, 3
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "x = %f\n"
.size .L.str.9, 8
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "y = %f\n"
.size .L.str.10, 8
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "Block number: %d\n"
.size .L.str.11, 18
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z5saxpyifPfS_"
.size .L__unnamed_1, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__saxpyifPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5saxpyifPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
// method to read the first, second and fourth input files
int read_input_one_two_four(int** input, char* filepath) {
FILE* fp = fopen(filepath, "r");
if (!fp) return fprintf(stderr, "Couldn't open file for reading\n");
int file_length;
fscanf(fp, "%d", &file_length);
*input = (int*) malloc(file_length * sizeof(int));
int next_int;
for (int i = 0; i < file_length; i++) {
fscanf(fp, "%d", &next_int);
(*input)[i] = next_int;
}
fclose(fp);
return file_length;
}
// method to read the third input file
int read_input_three(int** input1, int** input2, int** input3, int** input4, char* filepath) {
FILE* fp = fopen(filepath, "r");
if (!fp) return fprintf(stderr, "Couldn't open file for reading\n");
int file_length;
fscanf(fp, "%d", &file_length);
*input1 = (int*) malloc(file_length * sizeof(int));
*input2 = (int*) malloc(file_length * sizeof(int));
*input3 = (int*) malloc(file_length * sizeof(int));
*input4 = (int*) malloc(file_length * sizeof(int));
int next_int1;
int next_int2;
int next_int3;
int next_int4;
for (int i = 0; i < file_length; i++) {
fscanf(fp, "%d, %d, %d, %d", &next_int1, &next_int2, &next_int3, &next_int4);
(*input1)[i] = next_int1;
(*input2)[i] = next_int2;
(*input3)[i] = next_int3;
(*input4)[i] = next_int4;
}
fclose(fp);
return file_length;
}
__device__ int globalQueue[7000000];
__device__ int numNextLevelNodes = 0;
__global__ void global_queuing_kernel(int totalThreads, int numNodes, int* nodePtrs, int* currLevelNodes, int* nodeNeighbors, int* nodeVisited, int* nodeGate, int* nodeInput, int* nodeOutput) {
int nodesPerThread = numNodes / totalThreads;
int threadIndex = threadIdx.x + (blockDim.x * blockIdx.x);
int beginIndex = threadIndex * nodesPerThread;
//Loop over all nodes in the current level
for (int index = beginIndex; index < numNodes && index < beginIndex + nodesPerThread; index++) {
int nodeIndex = currLevelNodes[index];
//Loop over all neighbors of the node
for (int secondIndex = nodePtrs[nodeIndex]; secondIndex < nodePtrs[nodeIndex+1]; secondIndex++) {
int neighborIndex = nodeNeighbors[secondIndex];
const int alreadyVisited = atomicExch(&(nodeVisited[neighborIndex]),1);
//If the neighbor hasn’t been visited yet
if (!alreadyVisited) {
int result = 0;
int nInputV = nodeInput[neighborIndex];
int nOutputV = nodeOutput[nodeIndex];
int nGateV = nodeGate[neighborIndex];
switch (nGateV) {
case 0:
if (nInputV == 1 && nOutputV == 1) {
result = 1;
}
else {
result = 0;
}
break;
case 1:
if (nInputV == 0 && nOutputV == 0) {
result = 0;
}
else {
result = 1;
}
break;
case 2:
if (nInputV == 1 && nOutputV == 1) {
result = 0;
} else {
result = 1;
}
break;
case 3:
if (nInputV == 0 && nOutputV == 0) {
result = 1;
} else {
result = 0;
}
break;
case 4:
if (nInputV == nOutputV) {
result = 0;
} else {
result = 1;
}
break;
case 5:
if (nInputV == nOutputV) {
result = 1;
} else {
result = 0;
}
break;
}
//Update node output
nodeOutput[neighborIndex] = result;
int globalQueueIndex = atomicAdd(&numNextLevelNodes,1);
//Add it to the global queue
globalQueue[globalQueueIndex] = neighborIndex;
}
}
__syncthreads();
}
}
inline cudaError_t checkCudaErr(cudaError_t err, const char* msg) {
if (err != cudaSuccess) {
fprintf(stderr, "Error at runtime %s: %s\n", msg, cudaGetErrorString(err));
}
return err;
}
int main(int argc, char *argv[]){
if (argc < 7) {
return fprintf(stderr, "Missing input argument(s)!\n");
}
// Variables
int numNodePtrs;
int numNodes;
int *nodePtrs_h;
int *nodeNeighbors_h;
int *nodeVisited_h;
int numTotalNeighbors_h;
int *currLevelNodes_h;
int numCurrLevelNodes;
int numNextLevelNodes_h;
int *nodeGate_h;
int *nodeInput_h;
int *nodeOutput_h;
numNodePtrs = read_input_one_two_four(&nodePtrs_h, argv[1]);
numTotalNeighbors_h = read_input_one_two_four(&nodeNeighbors_h, argv[2]);
numNodes = read_input_three(&nodeVisited_h, &nodeGate_h, &nodeInput_h, &nodeOutput_h,argv[3]);
numCurrLevelNodes = read_input_one_two_four(&currLevelNodes_h, argv[4]);
char* nodeOutput_fileName = argv[5];
char* nextLevelNodes_fileName = argv[6];
// output
int *nextLevelNodes_h = (int *)malloc(numNodes*sizeof(int));
checkCudaErr(cudaMemcpyToSymbol(globalQueue,nextLevelNodes_h, numNodes * sizeof(int)), "Copying");
int numNodesSize = numNodes * sizeof(int);
int numBlocks = 35;
int blockSize = 128;
// Cuda variables
int* nodePtrs_cuda = (int*)malloc( numNodePtrs * sizeof(int)) ;
cudaMalloc (&nodePtrs_cuda, numNodePtrs * sizeof(int));
cudaMemcpy(nodePtrs_cuda, nodePtrs_h, numNodePtrs * sizeof(int), cudaMemcpyHostToDevice);
int* currLevelNodes_cuda = (int*)malloc( numCurrLevelNodes * sizeof(int)) ;
cudaMalloc (&currLevelNodes_cuda, numCurrLevelNodes * sizeof(int));
cudaMemcpy(currLevelNodes_cuda, currLevelNodes_h, numCurrLevelNodes * sizeof(int), cudaMemcpyHostToDevice);
int* nodeNeighbors_cuda = (int*)malloc( numTotalNeighbors_h * sizeof(int)) ;
cudaMalloc (&nodeNeighbors_cuda, numTotalNeighbors_h * sizeof(int));
cudaMemcpy(nodeNeighbors_cuda, nodeNeighbors_h, numTotalNeighbors_h * sizeof(int), cudaMemcpyHostToDevice);
int* nodeVisited_cuda = (int*)malloc( numNodesSize) ;
cudaMalloc (&nodeVisited_cuda, numNodesSize);
cudaMemcpy(nodeVisited_cuda, nodeVisited_h,numNodesSize, cudaMemcpyHostToDevice);
int* nodeGate_cuda = (int*)malloc( numNodesSize) ;
cudaMalloc (&nodeGate_cuda, numNodesSize);
cudaMemcpy(nodeGate_cuda, nodeGate_h, numNodesSize, cudaMemcpyHostToDevice);
int* nodeInput_cuda = (int*)malloc( numNodesSize) ;
cudaMalloc (&nodeInput_cuda, numNodesSize);
cudaMemcpy(nodeInput_cuda, nodeInput_h, numNodesSize, cudaMemcpyHostToDevice);
int* nodeOutput_cuda = (int*)malloc(numNodesSize) ;
cudaMalloc (&nodeOutput_cuda, numNodesSize);
cudaMemcpy(nodeOutput_cuda, nodeOutput_h, numNodesSize, cudaMemcpyHostToDevice);
clock_t start = clock();
// kernel call
global_queuing_kernel <<< numBlocks, blockSize >>> (blockSize * numBlocks, numNodes, nodePtrs_cuda, currLevelNodes_cuda, nodeNeighbors_cuda, nodeVisited_cuda, nodeGate_cuda, nodeInput_cuda, nodeOutput_cuda);
clock_t end = clock();
checkCudaErr(cudaDeviceSynchronize(), "Synchronization");
checkCudaErr(cudaGetLastError(), "GPU");
cudaMemcpyFromSymbol(&numNextLevelNodes_h, numNextLevelNodes, sizeof(int), 0, cudaMemcpyDeviceToHost);
checkCudaErr(cudaMemcpyFromSymbol(nextLevelNodes_h,globalQueue, numNodesSize), "Copying");
int *outputBuffer;
outputBuffer = (int*)malloc( numNodesSize);
checkCudaErr(cudaMemcpy(outputBuffer, nodeOutput_cuda, numNodesSize, cudaMemcpyDeviceToHost), "Copying");
// write node output file
FILE *nodeOutputFile = fopen(nodeOutput_fileName, "w");
int counter = 0;
fprintf(nodeOutputFile,"%d\n",numNodes);
while (counter < numNodes) {
fprintf(nodeOutputFile,"%d\n",(outputBuffer[counter]));
counter++;
}
fclose(nodeOutputFile);
// write next level output file
FILE *nextLevelOutputFile = fopen(nextLevelNodes_fileName, "w");
counter = 0;
fprintf(nextLevelOutputFile,"%d\n",numNextLevelNodes_h);
while (counter < numNextLevelNodes_h) {
fprintf(nextLevelOutputFile,"%d\n",(nextLevelNodes_h[counter]));
counter++;
}
fclose(nextLevelOutputFile);
double runtime = (double) (end - start) / CLOCKS_PER_SEC * 1000;
printf("Execution time: %f ms\n", runtime);
// free variables
free(nodePtrs_h);
free(nodeNeighbors_h);
free(nodeVisited_h);
free(currLevelNodes_h);
free(nodeGate_h);
free(nodeInput_h);
free(nodeOutput_h);
// free cuda variables
cudaFree(currLevelNodes_cuda);
cudaFree(nodeNeighbors_cuda);
cudaFree(nodePtrs_cuda);
cudaFree(nodeVisited_cuda);
cudaFree(nodeInput_cuda);
cudaFree(nodeOutput_cuda);
cudaFree(nodeGate_cuda);
return 0;
} | code for sm_80
Function : _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IABS R5, c[0x0][0x160] ; /* 0x0000580000057a13 */
/* 0x000fe20000000000 */
/*0020*/ ULDC.64 UR4, c[0x0][0x160] ; /* 0x0000580000047ab9 */
/* 0x000fe40000000a00 */
/*0030*/ ULOP3.LUT UR4, UR5, UR4, URZ, 0x3c, !UPT ; /* 0x0000000405047292 */
/* 0x000fe2000f8e3c3f */
/*0040*/ I2F.RP R0, R5 ; /* 0x0000000500007306 */
/* 0x000e2a0000209400 */
/*0050*/ ISETP.LE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fc6000bf23270 */
/*0060*/ MUFU.RCP R0, R0 ; /* 0x0000000000007308 */
/* 0x001e240000001000 */
/*0070*/ IADD3 R2, R0, 0xffffffe, RZ ; /* 0x0ffffffe00027810 */
/* 0x001fcc0007ffe0ff */
/*0080*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*0090*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x001fe400078e00ff */
/*00a0*/ IMAD.MOV R4, RZ, RZ, -R3 ; /* 0x000000ffff047224 */
/* 0x002fc800078e0a03 */
/*00b0*/ IMAD R7, R4, R5, RZ ; /* 0x0000000504077224 */
/* 0x000fe200078e02ff */
/*00c0*/ IABS R4, c[0x0][0x164] ; /* 0x0000590000047a13 */
/* 0x000fc60000000000 */
/*00d0*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fe400078e0002 */
/*00e0*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*00f0*/ IMAD.HI.U32 R3, R3, R4, RZ ; /* 0x0000000403037227 */
/* 0x000fc800078e00ff */
/*0100*/ IMAD.MOV R0, RZ, RZ, -R3 ; /* 0x000000ffff007224 */
/* 0x000fc800078e0a03 */
/*0110*/ IMAD R0, R5, R0, R4 ; /* 0x0000000005007224 */
/* 0x000fca00078e0204 */
/*0120*/ ISETP.GT.U32.AND P2, PT, R5, R0, PT ; /* 0x000000000500720c */
/* 0x000fda0003f44070 */
/*0130*/ @!P2 IMAD.IADD R0, R0, 0x1, -R5 ; /* 0x000000010000a824 */
/* 0x000fe200078e0a05 */
/*0140*/ @!P2 IADD3 R3, R3, 0x1, RZ ; /* 0x000000010303a810 */
/* 0x000fe40007ffe0ff */
/*0150*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x160], PT ; /* 0x00005800ff007a0c */
/* 0x000fe40003f45270 */
/*0160*/ ISETP.GE.U32.AND P0, PT, R0, R5, PT ; /* 0x000000050000720c */
/* 0x000fe40003f06070 */
/*0170*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e360000002100 */
/*0180*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fca0007ffe0ff */
/*0190*/ @!P1 IMAD.MOV R3, RZ, RZ, -R3 ; /* 0x000000ffff039224 */
/* 0x000fe200078e0a03 */
/*01a0*/ @!P2 LOP3.LUT R3, RZ, c[0x0][0x160], RZ, 0x33, !PT ; /* 0x00005800ff03aa12 */
/* 0x000fc800078e33ff */
/*01b0*/ ISETP.GE.AND P0, PT, R3, 0x1, PT ; /* 0x000000010300780c */
/* 0x000fe20003f06270 */
/*01c0*/ IMAD R0, R2, c[0x0][0x0], R5 ; /* 0x0000000002007a24 */
/* 0x001fc800078e0205 */
/*01d0*/ IMAD R0, R0, R3, RZ ; /* 0x0000000300007224 */
/* 0x000fca00078e02ff */
/*01e0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x164], !P0 ; /* 0x0000590000007a0c */
/* 0x000fda0004706670 */
/*01f0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0200*/ IMAD.IADD R6, R3, 0x1, R0 ; /* 0x0000000103067824 */
/* 0x000fe200078e0200 */
/*0210*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0220*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4 ; /* 0x00000004ff097424 */
/* 0x000fc800078e00ff */
/*0230*/ IMAD.WIDE R4, R0, R9, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x000fca00078e0209 */
/*0240*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */
/* 0x000ea4000c1e1900 */
/*0250*/ IMAD.WIDE R2, R10, R9, c[0x0][0x168] ; /* 0x00005a000a027625 */
/* 0x004fca00078e0209 */
/*0260*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ea8000c1e1900 */
/*0270*/ LDG.E R8, [R2.64+0x4] ; /* 0x0000040402087981 */
/* 0x000ea4000c1e1900 */
/*0280*/ ISETP.GE.AND P0, PT, R7, R8, PT ; /* 0x000000080700720c */
/* 0x004fda0003f06270 */
/*0290*/ @P0 BRA 0x7b0 ; /* 0x0000051000000947 */
/* 0x000fea0003800000 */
/*02a0*/ SHF.R.S32.HI R5, RZ, 0x1f, R10 ; /* 0x0000001fff057819 */
/* 0x000fe2000001140a */
/*02b0*/ IMAD.WIDE R8, R7, R9, c[0x0][0x178] ; /* 0x00005e0007087625 */
/* 0x000fe200078e0209 */
/*02c0*/ LEA R4, P0, R10, c[0x0][0x198], 0x2 ; /* 0x000066000a047a11 */
/* 0x000fc800078010ff */
/*02d0*/ LEA.HI.X R5, R10, c[0x0][0x19c], R5, 0x2, P0 ; /* 0x000067000a057a11 */
/* 0x000fe400000f1405 */
/*02e0*/ LDG.E R10, [R8.64] ; /* 0x00000004080a7981 */
/* 0x000ea2000c1e1900 */
/*02f0*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */
/* 0x000fe200078e00ff */
/*0300*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*0310*/ IMAD.MOV.U32 R15, RZ, RZ, 0x1 ; /* 0x00000001ff0f7424 */
/* 0x000fe400078e00ff */
/*0320*/ IMAD.WIDE R12, R10, R11, c[0x0][0x180] ; /* 0x000060000a0c7625 */
/* 0x004fcc00078e020b */
/*0330*/ ATOMG.E.EXCH.STRONG.GPU PT, R12, [R12.64], R15 ; /* 0x0000000f0c0c79a8 */
/* 0x000ea2000c1ee1c4 */
/*0340*/ BSSY B0, 0x750 ; /* 0x0000040000007945 */
/* 0x000fe20003800000 */
/*0350*/ ISETP.NE.AND P0, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */
/* 0x004fda0003f05270 */
/*0360*/ @P0 BRA 0x740 ; /* 0x000003d000000947 */
/* 0x000fea0003800000 */
/*0370*/ IMAD.SHL.U32 R12, R10.reuse, 0x4, RZ ; /* 0x000000040a0c7824 */
/* 0x040fe200078e00ff */
/*0380*/ SHF.R.S32.HI R13, RZ, 0x1f, R10 ; /* 0x0000001fff0d7819 */
/* 0x000fe2000001140a */
/*0390*/ LDG.E R19, [R4.64] ; /* 0x0000000404137981 */
/* 0x000166000c1e1900 */
/*03a0*/ SHF.L.U64.HI R13, R10, 0x2, R13 ; /* 0x000000020a0d7819 */
/* 0x000fe4000001020d */
/*03b0*/ IADD3 R16, P0, R12, c[0x0][0x188], RZ ; /* 0x000062000c107a10 */
/* 0x000fc80007f1e0ff */
/*03c0*/ IADD3.X R17, R13, c[0x0][0x18c], RZ, P0, !PT ; /* 0x000063000d117a10 */
/* 0x000fca00007fe4ff */
/*03d0*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000ea2000c1e1900 */
/*03e0*/ IADD3 R14, P0, R12, c[0x0][0x190], RZ ; /* 0x000064000c0e7a10 */
/* 0x000fc80007f1e0ff */
/*03f0*/ IADD3.X R15, R13, c[0x0][0x194], RZ, P0, !PT ; /* 0x000065000d0f7a10 */
/* 0x000fca00007fe4ff */
/*0400*/ LDG.E R18, [R14.64] ; /* 0x000000040e127981 */
/* 0x000162000c1e1900 */
/*0410*/ BSSY B1, 0x610 ; /* 0x000001f000017945 */
/* 0x000fe20003800000 */
/*0420*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0430*/ ISETP.GT.AND P1, PT, R16, 0x2, PT ; /* 0x000000021000780c */
/* 0x004fda0003f24270 */
/*0440*/ @P1 BRA 0x520 ; /* 0x000000d000001947 */
/* 0x000fea0003800000 */
/*0450*/ IMNMX.U32 R14, R16, 0x3, PT ; /* 0x00000003100e7817 */
/* 0x001fca0003800000 */
/*0460*/ IMAD.SHL.U32 R16, R14, 0x4, RZ ; /* 0x000000040e107824 */
/* 0x000fc800078e00ff */
/*0470*/ LDC R14, c[0x2][R16] ; /* 0x00800000100e7b82 */
/* 0x000e240000000800 */
/*0480*/ SHF.R.S32.HI R15, RZ, 0x1f, R14 ; /* 0x0000001fff0f7819 */
/* 0x001fc8000001140e */
/*0490*/ BRX R14 -0x4a0 ; /* 0xfffffb600e007949 */
/* 0x000fea000383ffff */
/*04a0*/ ISETP.NE.AND P0, PT, R19, 0x1, PT ; /* 0x000000011300780c */
/* 0x020fc80003f05270 */
/*04b0*/ ISETP.NE.OR P0, PT, R18, 0x1, P0 ; /* 0x000000011200780c */
/* 0x000fe20000705670 */
/*04c0*/ BRA 0x600 ; /* 0x0000013000007947 */
/* 0x000fee0003800000 */
/*04d0*/ ISETP.NE.AND P0, PT, R19, 0x1, PT ; /* 0x000000011300780c */
/* 0x020fc80003f05270 */
/*04e0*/ ISETP.EQ.AND P0, PT, R18, 0x1, !P0 ; /* 0x000000011200780c */
/* 0x000fe20004702270 */
/*04f0*/ BRA 0x600 ; /* 0x0000010000007947 */
/* 0x000fee0003800000 */
/*0500*/ LOP3.LUT P0, RZ, R19, R18, RZ, 0xfc, !PT ; /* 0x0000001213ff7212 */
/* 0x020fe2000780fcff */
/*0510*/ BRA 0x600 ; /* 0x000000e000007947 */
/* 0x000fee0003800000 */
/*0520*/ IADD3 R14, R16, -0x3, RZ ; /* 0xfffffffd100e7810 */
/* 0x001fc80007ffe0ff */
/*0530*/ IMNMX.U32 R14, R14, 0x2, PT ; /* 0x000000020e0e7817 */
/* 0x000fca0003800000 */
/*0540*/ IMAD.SHL.U32 R17, R14, 0x4, RZ ; /* 0x000000040e117824 */
/* 0x000fc800078e00ff */
/*0550*/ LDC R14, c[0x2][R17+0x10] ; /* 0x00800400110e7b82 */
/* 0x000e240000000800 */
/*0560*/ SHF.R.S32.HI R15, RZ, 0x1f, R14 ; /* 0x0000001fff0f7819 */
/* 0x001fc8000001140e */
/*0570*/ BRX R14 -0x580 ; /* 0xfffffa800e007949 */
/* 0x000fea000383ffff */
/*0580*/ ISETP.NE.AND P1, PT, R16, 0x5, PT ; /* 0x000000051000780c */
/* 0x000fda0003f25270 */
/*0590*/ @P1 BRA 0x600 ; /* 0x0000006000001947 */
/* 0x000fea0003800000 */
/*05a0*/ ISETP.EQ.AND P0, PT, R18, R19, PT ; /* 0x000000131200720c */
/* 0x020fe20003f02270 */
/*05b0*/ BRA 0x600 ; /* 0x0000004000007947 */
/* 0x000fee0003800000 */
/*05c0*/ LOP3.LUT P0, RZ, R19, R18, RZ, 0xfc, !PT ; /* 0x0000001213ff7212 */
/* 0x020fc8000780fcff */
/*05d0*/ PLOP3.LUT P0, PT, P0, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe2000070e170 */
/*05e0*/ BRA 0x600 ; /* 0x0000001000007947 */
/* 0x000fee0003800000 */
/*05f0*/ ISETP.NE.AND P0, PT, R18, R19, PT ; /* 0x000000131200720c */
/* 0x020fd00003f05270 */
/*0600*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0610*/ S2R R15, SR_LANEID ; /* 0x00000000000f7919 */
/* 0x000e220000000000 */
/*0620*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */
/* 0x000fe200038e0100 */
/*0630*/ IADD3 R12, P2, R12, c[0x0][0x198], RZ ; /* 0x000066000c0c7a10 */
/* 0x000fe20007f5e0ff */
/*0640*/ FLO.U32 R18, UR6 ; /* 0x0000000600127d00 */
/* 0x020e2200080e0000 */
/*0650*/ SEL R19, RZ, 0x1, !P0 ; /* 0x00000001ff137807 */
/* 0x000fe20004000000 */
/*0660*/ IMAD.MOV.U32 R14, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff0e7624 */
/* 0x000fe200078e00ff */
/*0670*/ IADD3.X R13, R13, c[0x0][0x19c], RZ, P2, !PT ; /* 0x000067000d0d7a10 */
/* 0x000fca00017fe4ff */
/*0680*/ POPC R21, UR6 ; /* 0x0000000600157d09 */
/* 0x000e620008000000 */
/*0690*/ STG.E [R12.64], R19 ; /* 0x000000130c007986 */
/* 0x000fe2000c101904 */
/*06a0*/ ISETP.EQ.U32.AND P1, PT, R18, R15, PT ; /* 0x0000000f1200720c */
/* 0x001fe20003f22070 */
/*06b0*/ IMAD.MOV.U32 R15, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff0f7624 */
/* 0x000fd800078e00ff */
/*06c0*/ @P1 ATOMG.E.ADD.STRONG.GPU PT, R15, [R14.64], R21 ; /* 0x000000150e0f19a8 */
/* 0x002ea800081ee1c4 */
/*06d0*/ S2R R17, SR_LTMASK ; /* 0x0000000000117919 */
/* 0x000e240000003900 */
/*06e0*/ LOP3.LUT R20, R17, UR6, RZ, 0xc0, !PT ; /* 0x0000000611147c12 */
/* 0x001fc8000f8ec0ff */
/*06f0*/ POPC R17, R20 ; /* 0x0000001400117309 */
/* 0x000e220000000000 */
/*0700*/ SHFL.IDX PT, R16, R15, R18, 0x1f ; /* 0x00001f120f107589 */
/* 0x004e2400000e0000 */
/*0710*/ IMAD.IADD R16, R16, 0x1, R17 ; /* 0x0000000110107824 */
/* 0x001fc800078e0211 */
/*0720*/ IMAD.WIDE R16, R16, R11, c[0x4][0x0] ; /* 0x0100000010107625 */
/* 0x000fca00078e020b */
/*0730*/ STG.E [R16.64], R10 ; /* 0x0000000a10007986 */
/* 0x0001e4000c101904 */
/*0740*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0750*/ LDG.E R10, [R2.64+0x4] ; /* 0x00000404020a7981 */
/* 0x001ea2000c1e1900 */
/*0760*/ IADD3 R7, R7, 0x1, RZ ; /* 0x0000000107077810 */
/* 0x000fe40007ffe0ff */
/*0770*/ IADD3 R8, P1, R8, 0x4, RZ ; /* 0x0000000408087810 */
/* 0x000fca0007f3e0ff */
/*0780*/ IMAD.X R9, RZ, RZ, R9, P1 ; /* 0x000000ffff097224 */
/* 0x000fe200008e0609 */
/*0790*/ ISETP.GE.AND P0, PT, R7, R10, PT ; /* 0x0000000a0700720c */
/* 0x004fda0003f06270 */
/*07a0*/ @!P0 BRA 0x2e0 ; /* 0xfffffb3000008947 */
/* 0x000fea000383ffff */
/*07b0*/ IADD3 R0, R0, 0x1, RZ ; /* 0x0000000100007810 */
/* 0x000fe20007ffe0ff */
/*07c0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*07d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe40000010000 */
/*07e0*/ ISETP.GE.AND P0, PT, R0.reuse, c[0x0][0x164], PT ; /* 0x0000590000007a0c */
/* 0x040fe40003f06270 */
/*07f0*/ ISETP.LT.AND P1, PT, R0, R6, PT ; /* 0x000000060000720c */
/* 0x000fda0003f21270 */
/*0800*/ @!P0 BRA P1, 0x220 ; /* 0xfffffa1000008947 */
/* 0x000fea000083ffff */
/*0810*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0820*/ BRA 0x820; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0830*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0840*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0850*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0860*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0870*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0880*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0890*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
// method to read the first, second and fourth input files
int read_input_one_two_four(int** input, char* filepath) {
FILE* fp = fopen(filepath, "r");
if (!fp) return fprintf(stderr, "Couldn't open file for reading\n");
int file_length;
fscanf(fp, "%d", &file_length);
*input = (int*) malloc(file_length * sizeof(int));
int next_int;
for (int i = 0; i < file_length; i++) {
fscanf(fp, "%d", &next_int);
(*input)[i] = next_int;
}
fclose(fp);
return file_length;
}
// method to read the third input file
int read_input_three(int** input1, int** input2, int** input3, int** input4, char* filepath) {
FILE* fp = fopen(filepath, "r");
if (!fp) return fprintf(stderr, "Couldn't open file for reading\n");
int file_length;
fscanf(fp, "%d", &file_length);
*input1 = (int*) malloc(file_length * sizeof(int));
*input2 = (int*) malloc(file_length * sizeof(int));
*input3 = (int*) malloc(file_length * sizeof(int));
*input4 = (int*) malloc(file_length * sizeof(int));
int next_int1;
int next_int2;
int next_int3;
int next_int4;
for (int i = 0; i < file_length; i++) {
fscanf(fp, "%d, %d, %d, %d", &next_int1, &next_int2, &next_int3, &next_int4);
(*input1)[i] = next_int1;
(*input2)[i] = next_int2;
(*input3)[i] = next_int3;
(*input4)[i] = next_int4;
}
fclose(fp);
return file_length;
}
__device__ int globalQueue[7000000];
__device__ int numNextLevelNodes = 0;
__global__ void global_queuing_kernel(int totalThreads, int numNodes, int* nodePtrs, int* currLevelNodes, int* nodeNeighbors, int* nodeVisited, int* nodeGate, int* nodeInput, int* nodeOutput) {
int nodesPerThread = numNodes / totalThreads;
int threadIndex = threadIdx.x + (blockDim.x * blockIdx.x);
int beginIndex = threadIndex * nodesPerThread;
//Loop over all nodes in the current level
for (int index = beginIndex; index < numNodes && index < beginIndex + nodesPerThread; index++) {
int nodeIndex = currLevelNodes[index];
//Loop over all neighbors of the node
for (int secondIndex = nodePtrs[nodeIndex]; secondIndex < nodePtrs[nodeIndex+1]; secondIndex++) {
int neighborIndex = nodeNeighbors[secondIndex];
const int alreadyVisited = atomicExch(&(nodeVisited[neighborIndex]),1);
//If the neighbor hasn’t been visited yet
if (!alreadyVisited) {
int result = 0;
int nInputV = nodeInput[neighborIndex];
int nOutputV = nodeOutput[nodeIndex];
int nGateV = nodeGate[neighborIndex];
switch (nGateV) {
case 0:
if (nInputV == 1 && nOutputV == 1) {
result = 1;
}
else {
result = 0;
}
break;
case 1:
if (nInputV == 0 && nOutputV == 0) {
result = 0;
}
else {
result = 1;
}
break;
case 2:
if (nInputV == 1 && nOutputV == 1) {
result = 0;
} else {
result = 1;
}
break;
case 3:
if (nInputV == 0 && nOutputV == 0) {
result = 1;
} else {
result = 0;
}
break;
case 4:
if (nInputV == nOutputV) {
result = 0;
} else {
result = 1;
}
break;
case 5:
if (nInputV == nOutputV) {
result = 1;
} else {
result = 0;
}
break;
}
//Update node output
nodeOutput[neighborIndex] = result;
int globalQueueIndex = atomicAdd(&numNextLevelNodes,1);
//Add it to the global queue
globalQueue[globalQueueIndex] = neighborIndex;
}
}
__syncthreads();
}
}
inline cudaError_t checkCudaErr(cudaError_t err, const char* msg) {
if (err != cudaSuccess) {
fprintf(stderr, "Error at runtime %s: %s\n", msg, cudaGetErrorString(err));
}
return err;
}
int main(int argc, char *argv[]){
if (argc < 7) {
return fprintf(stderr, "Missing input argument(s)!\n");
}
// Variables
int numNodePtrs;
int numNodes;
int *nodePtrs_h;
int *nodeNeighbors_h;
int *nodeVisited_h;
int numTotalNeighbors_h;
int *currLevelNodes_h;
int numCurrLevelNodes;
int numNextLevelNodes_h;
int *nodeGate_h;
int *nodeInput_h;
int *nodeOutput_h;
numNodePtrs = read_input_one_two_four(&nodePtrs_h, argv[1]);
numTotalNeighbors_h = read_input_one_two_four(&nodeNeighbors_h, argv[2]);
numNodes = read_input_three(&nodeVisited_h, &nodeGate_h, &nodeInput_h, &nodeOutput_h,argv[3]);
numCurrLevelNodes = read_input_one_two_four(&currLevelNodes_h, argv[4]);
char* nodeOutput_fileName = argv[5];
char* nextLevelNodes_fileName = argv[6];
// output
int *nextLevelNodes_h = (int *)malloc(numNodes*sizeof(int));
checkCudaErr(cudaMemcpyToSymbol(globalQueue,nextLevelNodes_h, numNodes * sizeof(int)), "Copying");
int numNodesSize = numNodes * sizeof(int);
int numBlocks = 35;
int blockSize = 128;
// Cuda variables
int* nodePtrs_cuda = (int*)malloc( numNodePtrs * sizeof(int)) ;
cudaMalloc (&nodePtrs_cuda, numNodePtrs * sizeof(int));
cudaMemcpy(nodePtrs_cuda, nodePtrs_h, numNodePtrs * sizeof(int), cudaMemcpyHostToDevice);
int* currLevelNodes_cuda = (int*)malloc( numCurrLevelNodes * sizeof(int)) ;
cudaMalloc (&currLevelNodes_cuda, numCurrLevelNodes * sizeof(int));
cudaMemcpy(currLevelNodes_cuda, currLevelNodes_h, numCurrLevelNodes * sizeof(int), cudaMemcpyHostToDevice);
int* nodeNeighbors_cuda = (int*)malloc( numTotalNeighbors_h * sizeof(int)) ;
cudaMalloc (&nodeNeighbors_cuda, numTotalNeighbors_h * sizeof(int));
cudaMemcpy(nodeNeighbors_cuda, nodeNeighbors_h, numTotalNeighbors_h * sizeof(int), cudaMemcpyHostToDevice);
int* nodeVisited_cuda = (int*)malloc( numNodesSize) ;
cudaMalloc (&nodeVisited_cuda, numNodesSize);
cudaMemcpy(nodeVisited_cuda, nodeVisited_h,numNodesSize, cudaMemcpyHostToDevice);
int* nodeGate_cuda = (int*)malloc( numNodesSize) ;
cudaMalloc (&nodeGate_cuda, numNodesSize);
cudaMemcpy(nodeGate_cuda, nodeGate_h, numNodesSize, cudaMemcpyHostToDevice);
int* nodeInput_cuda = (int*)malloc( numNodesSize) ;
cudaMalloc (&nodeInput_cuda, numNodesSize);
cudaMemcpy(nodeInput_cuda, nodeInput_h, numNodesSize, cudaMemcpyHostToDevice);
int* nodeOutput_cuda = (int*)malloc(numNodesSize) ;
cudaMalloc (&nodeOutput_cuda, numNodesSize);
cudaMemcpy(nodeOutput_cuda, nodeOutput_h, numNodesSize, cudaMemcpyHostToDevice);
clock_t start = clock();
// kernel call
global_queuing_kernel <<< numBlocks, blockSize >>> (blockSize * numBlocks, numNodes, nodePtrs_cuda, currLevelNodes_cuda, nodeNeighbors_cuda, nodeVisited_cuda, nodeGate_cuda, nodeInput_cuda, nodeOutput_cuda);
clock_t end = clock();
checkCudaErr(cudaDeviceSynchronize(), "Synchronization");
checkCudaErr(cudaGetLastError(), "GPU");
cudaMemcpyFromSymbol(&numNextLevelNodes_h, numNextLevelNodes, sizeof(int), 0, cudaMemcpyDeviceToHost);
checkCudaErr(cudaMemcpyFromSymbol(nextLevelNodes_h,globalQueue, numNodesSize), "Copying");
int *outputBuffer;
outputBuffer = (int*)malloc( numNodesSize);
checkCudaErr(cudaMemcpy(outputBuffer, nodeOutput_cuda, numNodesSize, cudaMemcpyDeviceToHost), "Copying");
// write node output file
FILE *nodeOutputFile = fopen(nodeOutput_fileName, "w");
int counter = 0;
fprintf(nodeOutputFile,"%d\n",numNodes);
while (counter < numNodes) {
fprintf(nodeOutputFile,"%d\n",(outputBuffer[counter]));
counter++;
}
fclose(nodeOutputFile);
// write next level output file
FILE *nextLevelOutputFile = fopen(nextLevelNodes_fileName, "w");
counter = 0;
fprintf(nextLevelOutputFile,"%d\n",numNextLevelNodes_h);
while (counter < numNextLevelNodes_h) {
fprintf(nextLevelOutputFile,"%d\n",(nextLevelNodes_h[counter]));
counter++;
}
fclose(nextLevelOutputFile);
double runtime = (double) (end - start) / CLOCKS_PER_SEC * 1000;
printf("Execution time: %f ms\n", runtime);
// free variables
free(nodePtrs_h);
free(nodeNeighbors_h);
free(nodeVisited_h);
free(currLevelNodes_h);
free(nodeGate_h);
free(nodeInput_h);
free(nodeOutput_h);
// free cuda variables
cudaFree(currLevelNodes_cuda);
cudaFree(nodeNeighbors_cuda);
cudaFree(nodePtrs_cuda);
cudaFree(nodeVisited_cuda);
cudaFree(nodeInput_cuda);
cudaFree(nodeOutput_cuda);
cudaFree(nodeGate_cuda);
return 0;
} | .file "tmpxft_0017befd_00000000-6_global_queueing.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "r"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Couldn't open file for reading\n"
.section .rodata.str1.1
.LC2:
.string "%d"
.text
.globl _Z23read_input_one_two_fourPPiPc
.type _Z23read_input_one_two_fourPPiPc, @function
_Z23read_input_one_two_fourPPiPc:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $24, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r12
movq %rsi, %rdi
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
call fopen@PLT
testq %rax, %rax
je .L11
movq %rax, %rbp
movq %rsp, %rdx
leaq .LC2(%rip), %rsi
movq %rax, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movl (%rsp), %ebx
movslq %ebx, %rdi
salq $2, %rdi
call malloc@PLT
movq %rax, (%r12)
testl %ebx, %ebx
jle .L6
movl $0, %ebx
leaq .LC2(%rip), %r13
.L7:
leaq 4(%rsp), %rdx
movq %r13, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movq (%r12), %rax
movl 4(%rsp), %edx
movl %edx, (%rax,%rbx,4)
addq $1, %rbx
cmpl %ebx, (%rsp)
jg .L7
.L6:
movq %rbp, %rdi
call fclose@PLT
movl (%rsp), %eax
.L3:
movq 8(%rsp), %rdx
subq %fs:40, %rdx
jne .L12
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L3
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z23read_input_one_two_fourPPiPc, .-_Z23read_input_one_two_fourPPiPc
.section .rodata.str1.1
.LC3:
.string "%d, %d, %d, %d"
.text
.globl _Z16read_input_threePPiS0_S0_S0_Pc
.type _Z16read_input_threePPiS0_S0_S0_Pc, @function
_Z16read_input_threePPiS0_S0_S0_Pc:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r12
movq %rsi, %r13
movq %rdx, %r14
movq %rcx, %r15
movq %r8, %rdi
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
call fopen@PLT
testq %rax, %rax
je .L21
movq %rax, %rbp
leaq 20(%rsp), %rdx
leaq .LC2(%rip), %rsi
movq %rax, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movl 20(%rsp), %eax
movl %eax, 12(%rsp)
movslq %eax, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, (%r12)
movq %rbx, %rdi
call malloc@PLT
movq %rax, 0(%r13)
movq %rbx, %rdi
call malloc@PLT
movq %rax, (%r14)
movq %rbx, %rdi
call malloc@PLT
movq %rax, (%r15)
cmpl $0, 12(%rsp)
jle .L16
movl $0, %ebx
.L17:
leaq 28(%rsp), %rcx
leaq 24(%rsp), %rdx
leaq 36(%rsp), %r9
leaq 32(%rsp), %r8
leaq .LC3(%rip), %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movq (%r12), %rax
movl 24(%rsp), %edx
movl %edx, (%rax,%rbx,4)
movq 0(%r13), %rax
movl 28(%rsp), %edx
movl %edx, (%rax,%rbx,4)
movq (%r14), %rax
movl 32(%rsp), %edx
movl %edx, (%rax,%rbx,4)
movq (%r15), %rax
movl 36(%rsp), %edx
movl %edx, (%rax,%rbx,4)
addq $1, %rbx
cmpl %ebx, 20(%rsp)
jg .L17
.L16:
movq %rbp, %rdi
call fclose@PLT
movl 20(%rsp), %eax
.L13:
movq 40(%rsp), %rdx
subq %fs:40, %rdx
jne .L22
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L13
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z16read_input_threePPiS0_S0_S0_Pc, .-_Z16read_input_threePPiS0_S0_S0_Pc
.section .rodata._Z12checkCudaErr9cudaErrorPKc.str1.1,"aMS",@progbits,1
.LC4:
.string "Error at runtime %s: %s\n"
.section .text._Z12checkCudaErr9cudaErrorPKc,"axG",@progbits,_Z12checkCudaErr9cudaErrorPKc,comdat
.weak _Z12checkCudaErr9cudaErrorPKc
.type _Z12checkCudaErr9cudaErrorPKc, @function
_Z12checkCudaErr9cudaErrorPKc:
.LFB2059:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movl %edi, %ebx
testl %edi, %edi
jne .L26
.L24:
movl %ebx, %eax
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
movq %rsi, %rbp
call cudaGetErrorString@PLT
movq %rax, %r8
movq %rbp, %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L24
.cfi_endproc
.LFE2059:
.size _Z12checkCudaErr9cudaErrorPKc, .-_Z12checkCudaErr9cudaErrorPKc
.text
.globl _Z55__device_stub__Z21global_queuing_kerneliiPiS_S_S_S_S_S_iiPiS_S_S_S_S_S_
.type _Z55__device_stub__Z21global_queuing_kerneliiPiS_S_S_S_S_S_iiPiS_S_S_S_S_S_, @function
_Z55__device_stub__Z21global_queuing_kerneliiPiS_S_S_S_S_S_iiPiS_S_S_S_S_S_:
.LFB2085:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movl %edi, 60(%rsp)
movl %esi, 56(%rsp)
movq %rdx, 48(%rsp)
movq %rcx, 40(%rsp)
movq %r8, 32(%rsp)
movq %r9, 24(%rsp)
movq 224(%rsp), %rax
movq %rax, 16(%rsp)
movq 232(%rsp), %rax
movq %rax, 8(%rsp)
movq 240(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 60(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rax
movq %rax, 152(%rsp)
leaq 32(%rsp), %rax
movq %rax, 160(%rsp)
leaq 24(%rsp), %rax
movq %rax, 168(%rsp)
leaq 16(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
movq %rsp, %rax
movq %rax, 192(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z21global_queuing_kerneliiPiS_S_S_S_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z55__device_stub__Z21global_queuing_kerneliiPiS_S_S_S_S_S_iiPiS_S_S_S_S_S_, .-_Z55__device_stub__Z21global_queuing_kerneliiPiS_S_S_S_S_S_iiPiS_S_S_S_S_S_
.globl _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.type _Z21global_queuing_kerneliiPiS_S_S_S_S_S_, @function
_Z21global_queuing_kerneliiPiS_S_S_S_S_S_:
.LFB2086:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 40(%rsp)
.cfi_def_cfa_offset 32
pushq 40(%rsp)
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z55__device_stub__Z21global_queuing_kerneliiPiS_S_S_S_S_S_iiPiS_S_S_S_S_S_
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z21global_queuing_kerneliiPiS_S_S_S_S_S_, .-_Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.section .rodata.str1.1
.LC5:
.string "Missing input argument(s)!\n"
.LC6:
.string "Copying"
.LC7:
.string "Synchronization"
.LC8:
.string "GPU"
.LC9:
.string "w"
.LC10:
.string "%d\n"
.LC13:
.string "Execution time: %f ms\n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $248, %rsp
.cfi_def_cfa_offset 304
movq %fs:40, %rax
movq %rax, 232(%rsp)
xorl %eax, %eax
cmpl $6, %edi
jg .L36
leaq .LC5(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
call __fprintf_chk@PLT
.L35:
movq 232(%rsp), %rdx
subq %fs:40, %rdx
jne .L47
addq $248, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L36:
.cfi_restore_state
movq %rsi, %r12
movq 8(%rsi), %rsi
leaq 96(%rsp), %rdi
call _Z23read_input_one_two_fourPPiPc
movl %eax, %r15d
movq 16(%r12), %rsi
leaq 104(%rsp), %rdi
call _Z23read_input_one_two_fourPPiPc
movl %eax, %ebx
leaq 144(%rsp), %rcx
leaq 136(%rsp), %rdx
leaq 128(%rsp), %rsi
leaq 112(%rsp), %rdi
movq 24(%r12), %r8
call _Z16read_input_threePPiS0_S0_S0_Pc
movl %eax, %r13d
movq 32(%r12), %rsi
leaq 120(%rsp), %rdi
call _Z23read_input_one_two_fourPPiPc
movl %eax, %ebp
movq 40(%r12), %rax
movq %rax, 56(%rsp)
movq 48(%r12), %rax
movq %rax, 64(%rsp)
movslq %r13d, %r14
salq $2, %r14
movq %r14, %rdi
call malloc@PLT
movq %rax, %r12
movl $1, %r8d
movl $0, %ecx
movq %r14, %rdx
movq %rax, %rsi
leaq _ZL11globalQueue(%rip), %rdi
call cudaMemcpyToSymbol@PLT
movl %eax, %edi
leaq .LC6(%rip), %rsi
call _Z12checkCudaErr9cudaErrorPKc
movslq %r15d, %r15
salq $2, %r15
movq %r15, %rdi
call malloc@PLT
movq %rax, 152(%rsp)
leaq 152(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movq 96(%rsp), %rax
movq %rax, 8(%rsp)
movl $1, %ecx
movq %r15, %rdx
movq %rax, %rsi
movq 152(%rsp), %rdi
call cudaMemcpy@PLT
movslq %ebp, %rbp
salq $2, %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, 160(%rsp)
leaq 160(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movq 120(%rsp), %rax
movq %rax, 16(%rsp)
movl $1, %ecx
movq %rbp, %rdx
movq %rax, %rsi
movq 160(%rsp), %rdi
call cudaMemcpy@PLT
movslq %ebx, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, 168(%rsp)
leaq 168(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movq 104(%rsp), %rax
movq %rax, 24(%rsp)
movl $1, %ecx
movq %rbx, %rdx
movq %rax, %rsi
movq 168(%rsp), %rdi
call cudaMemcpy@PLT
leal 0(,%r13,4), %ebx
movslq %ebx, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, 176(%rsp)
leaq 176(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movq 112(%rsp), %rax
movq %rax, 32(%rsp)
movl $1, %ecx
movq %rbx, %rdx
movq %rax, %rsi
movq 176(%rsp), %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call malloc@PLT
movq %rax, 184(%rsp)
leaq 184(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movq 128(%rsp), %rax
movq %rax, 40(%rsp)
movl $1, %ecx
movq %rbx, %rdx
movq %rax, %rsi
movq 184(%rsp), %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call malloc@PLT
movq %rax, 192(%rsp)
leaq 192(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movq 136(%rsp), %rax
movq %rax, 48(%rsp)
movl $1, %ecx
movq %rbx, %rdx
movq %rax, %rsi
movq 192(%rsp), %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call malloc@PLT
movq %rax, 200(%rsp)
leaq 200(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movq 144(%rsp), %r15
movl $1, %ecx
movq %rbx, %rdx
movq %r15, %rsi
movq 200(%rsp), %rdi
call cudaMemcpy@PLT
call clock@PLT
movq %rax, 72(%rsp)
movl $128, 220(%rsp)
movl $1, 224(%rsp)
movl $1, 228(%rsp)
movl $35, 208(%rsp)
movl $1, 212(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 220(%rsp), %rdx
movl $1, %ecx
movq 208(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L48
.L38:
call clock@PLT
movq %rax, 80(%rsp)
call cudaDeviceSynchronize@PLT
movl %eax, %edi
leaq .LC7(%rip), %rsi
call _Z12checkCudaErr9cudaErrorPKc
call cudaGetLastError@PLT
movl %eax, %edi
leaq .LC8(%rip), %rsi
call _Z12checkCudaErr9cudaErrorPKc
leaq 220(%rsp), %rdi
movl $2, %r8d
movl $0, %ecx
movl $4, %edx
leaq _ZL17numNextLevelNodes(%rip), %rsi
call cudaMemcpyFromSymbol@PLT
movl $2, %r8d
movl $0, %ecx
movq %rbx, %rdx
leaq _ZL11globalQueue(%rip), %rsi
movq %r12, %rdi
call cudaMemcpyFromSymbol@PLT
movl %eax, %edi
leaq .LC6(%rip), %rbp
movq %rbp, %rsi
call _Z12checkCudaErr9cudaErrorPKc
movq %rbx, %rdi
call malloc@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 200(%rsp), %rsi
movq %rax, 88(%rsp)
movq %rax, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movq %rbp, %rsi
call _Z12checkCudaErr9cudaErrorPKc
leaq .LC9(%rip), %rsi
movq 56(%rsp), %rdi
call fopen@PLT
movq %rax, %rbx
movl %r13d, %ecx
leaq .LC10(%rip), %rdx
movl $2, %esi
movq %rax, %rdi
movl $0, %eax
call __fprintf_chk@PLT
testl %r13d, %r13d
jle .L39
movq 88(%rsp), %rbp
movq %rbp, %r13
addq %r14, %rbp
leaq .LC10(%rip), %r14
.L40:
movl 0(%r13), %ecx
movq %r14, %rdx
movl $2, %esi
movq %rbx, %rdi
movl $0, %eax
call __fprintf_chk@PLT
addq $4, %r13
cmpq %rbp, %r13
jne .L40
.L39:
movq %rbx, %rdi
call fclose@PLT
leaq .LC9(%rip), %rsi
movq 64(%rsp), %rdi
call fopen@PLT
movq %rax, %rbp
movl 220(%rsp), %ecx
leaq .LC10(%rip), %rdx
movl $2, %esi
movq %rax, %rdi
movl $0, %eax
call __fprintf_chk@PLT
cmpl $0, 220(%rsp)
jle .L41
movl $0, %ebx
leaq .LC10(%rip), %r13
.L42:
movl (%r12,%rbx,4), %ecx
movq %r13, %rdx
movl $2, %esi
movq %rbp, %rdi
movl $0, %eax
call __fprintf_chk@PLT
addq $1, %rbx
cmpl %ebx, 220(%rsp)
jg .L42
.L41:
movq %rbp, %rdi
call fclose@PLT
movq 80(%rsp), %rax
movq 72(%rsp), %rdx
subq %rdx, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC11(%rip), %xmm0
mulsd .LC12(%rip), %xmm0
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call free@PLT
movq 24(%rsp), %rdi
call free@PLT
movq 32(%rsp), %rdi
call free@PLT
movq 16(%rsp), %rdi
call free@PLT
movq 40(%rsp), %rdi
call free@PLT
movq 48(%rsp), %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
movq 160(%rsp), %rdi
call cudaFree@PLT
movq 168(%rsp), %rdi
call cudaFree@PLT
movq 152(%rsp), %rdi
call cudaFree@PLT
movq 176(%rsp), %rdi
call cudaFree@PLT
movq 192(%rsp), %rdi
call cudaFree@PLT
movq 200(%rsp), %rdi
call cudaFree@PLT
movq 184(%rsp), %rdi
call cudaFree@PLT
movl $0, %eax
jmp .L35
.L48:
subq $8, %rsp
.cfi_def_cfa_offset 312
pushq 208(%rsp)
.cfi_def_cfa_offset 320
pushq 208(%rsp)
.cfi_def_cfa_offset 328
pushq 208(%rsp)
.cfi_def_cfa_offset 336
movq 208(%rsp), %r9
movq 200(%rsp), %r8
movq 192(%rsp), %rcx
movq 184(%rsp), %rdx
movl %r13d, %esi
movl $4480, %edi
call _Z55__device_stub__Z21global_queuing_kerneliiPiS_S_S_S_S_S_iiPiS_S_S_S_S_S_
addq $32, %rsp
.cfi_def_cfa_offset 304
jmp .L38
.L47:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC14:
.string "_Z21global_queuing_kerneliiPiS_S_S_S_S_S_"
.section .rodata.str1.1
.LC15:
.string "globalQueue"
.LC16:
.string "numNextLevelNodes"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z21global_queuing_kerneliiPiS_S_S_S_S_S_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $28000000, %r9d
movl $0, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _ZL11globalQueue(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC16(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17numNextLevelNodes(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL17numNextLevelNodes
.comm _ZL17numNextLevelNodes,4,4
.local _ZL11globalQueue
.comm _ZL11globalQueue,28000000,32
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC11:
.long 0
.long 1093567616
.align 8
.LC12:
.long 0
.long 1083129856
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
// method to read the first, second and fourth input files
int read_input_one_two_four(int** input, char* filepath) {
FILE* fp = fopen(filepath, "r");
if (!fp) return fprintf(stderr, "Couldn't open file for reading\n");
int file_length;
fscanf(fp, "%d", &file_length);
*input = (int*) malloc(file_length * sizeof(int));
int next_int;
for (int i = 0; i < file_length; i++) {
fscanf(fp, "%d", &next_int);
(*input)[i] = next_int;
}
fclose(fp);
return file_length;
}
// method to read the third input file
int read_input_three(int** input1, int** input2, int** input3, int** input4, char* filepath) {
FILE* fp = fopen(filepath, "r");
if (!fp) return fprintf(stderr, "Couldn't open file for reading\n");
int file_length;
fscanf(fp, "%d", &file_length);
*input1 = (int*) malloc(file_length * sizeof(int));
*input2 = (int*) malloc(file_length * sizeof(int));
*input3 = (int*) malloc(file_length * sizeof(int));
*input4 = (int*) malloc(file_length * sizeof(int));
int next_int1;
int next_int2;
int next_int3;
int next_int4;
for (int i = 0; i < file_length; i++) {
fscanf(fp, "%d, %d, %d, %d", &next_int1, &next_int2, &next_int3, &next_int4);
(*input1)[i] = next_int1;
(*input2)[i] = next_int2;
(*input3)[i] = next_int3;
(*input4)[i] = next_int4;
}
fclose(fp);
return file_length;
}
__device__ int globalQueue[7000000];
__device__ int numNextLevelNodes = 0;
__global__ void global_queuing_kernel(int totalThreads, int numNodes, int* nodePtrs, int* currLevelNodes, int* nodeNeighbors, int* nodeVisited, int* nodeGate, int* nodeInput, int* nodeOutput) {
int nodesPerThread = numNodes / totalThreads;
int threadIndex = threadIdx.x + (blockDim.x * blockIdx.x);
int beginIndex = threadIndex * nodesPerThread;
//Loop over all nodes in the current level
for (int index = beginIndex; index < numNodes && index < beginIndex + nodesPerThread; index++) {
int nodeIndex = currLevelNodes[index];
//Loop over all neighbors of the node
for (int secondIndex = nodePtrs[nodeIndex]; secondIndex < nodePtrs[nodeIndex+1]; secondIndex++) {
int neighborIndex = nodeNeighbors[secondIndex];
const int alreadyVisited = atomicExch(&(nodeVisited[neighborIndex]),1);
//If the neighbor hasn’t been visited yet
if (!alreadyVisited) {
int result = 0;
int nInputV = nodeInput[neighborIndex];
int nOutputV = nodeOutput[nodeIndex];
int nGateV = nodeGate[neighborIndex];
switch (nGateV) {
case 0:
if (nInputV == 1 && nOutputV == 1) {
result = 1;
}
else {
result = 0;
}
break;
case 1:
if (nInputV == 0 && nOutputV == 0) {
result = 0;
}
else {
result = 1;
}
break;
case 2:
if (nInputV == 1 && nOutputV == 1) {
result = 0;
} else {
result = 1;
}
break;
case 3:
if (nInputV == 0 && nOutputV == 0) {
result = 1;
} else {
result = 0;
}
break;
case 4:
if (nInputV == nOutputV) {
result = 0;
} else {
result = 1;
}
break;
case 5:
if (nInputV == nOutputV) {
result = 1;
} else {
result = 0;
}
break;
}
//Update node output
nodeOutput[neighborIndex] = result;
int globalQueueIndex = atomicAdd(&numNextLevelNodes,1);
//Add it to the global queue
globalQueue[globalQueueIndex] = neighborIndex;
}
}
__syncthreads();
}
}
inline cudaError_t checkCudaErr(cudaError_t err, const char* msg) {
if (err != cudaSuccess) {
fprintf(stderr, "Error at runtime %s: %s\n", msg, cudaGetErrorString(err));
}
return err;
}
int main(int argc, char *argv[]){
if (argc < 7) {
return fprintf(stderr, "Missing input argument(s)!\n");
}
// Variables
int numNodePtrs;
int numNodes;
int *nodePtrs_h;
int *nodeNeighbors_h;
int *nodeVisited_h;
int numTotalNeighbors_h;
int *currLevelNodes_h;
int numCurrLevelNodes;
int numNextLevelNodes_h;
int *nodeGate_h;
int *nodeInput_h;
int *nodeOutput_h;
numNodePtrs = read_input_one_two_four(&nodePtrs_h, argv[1]);
numTotalNeighbors_h = read_input_one_two_four(&nodeNeighbors_h, argv[2]);
numNodes = read_input_three(&nodeVisited_h, &nodeGate_h, &nodeInput_h, &nodeOutput_h,argv[3]);
numCurrLevelNodes = read_input_one_two_four(&currLevelNodes_h, argv[4]);
char* nodeOutput_fileName = argv[5];
char* nextLevelNodes_fileName = argv[6];
// output
int *nextLevelNodes_h = (int *)malloc(numNodes*sizeof(int));
checkCudaErr(cudaMemcpyToSymbol(globalQueue,nextLevelNodes_h, numNodes * sizeof(int)), "Copying");
int numNodesSize = numNodes * sizeof(int);
int numBlocks = 35;
int blockSize = 128;
// Cuda variables
int* nodePtrs_cuda = (int*)malloc( numNodePtrs * sizeof(int)) ;
cudaMalloc (&nodePtrs_cuda, numNodePtrs * sizeof(int));
cudaMemcpy(nodePtrs_cuda, nodePtrs_h, numNodePtrs * sizeof(int), cudaMemcpyHostToDevice);
int* currLevelNodes_cuda = (int*)malloc( numCurrLevelNodes * sizeof(int)) ;
cudaMalloc (&currLevelNodes_cuda, numCurrLevelNodes * sizeof(int));
cudaMemcpy(currLevelNodes_cuda, currLevelNodes_h, numCurrLevelNodes * sizeof(int), cudaMemcpyHostToDevice);
int* nodeNeighbors_cuda = (int*)malloc( numTotalNeighbors_h * sizeof(int)) ;
cudaMalloc (&nodeNeighbors_cuda, numTotalNeighbors_h * sizeof(int));
cudaMemcpy(nodeNeighbors_cuda, nodeNeighbors_h, numTotalNeighbors_h * sizeof(int), cudaMemcpyHostToDevice);
int* nodeVisited_cuda = (int*)malloc( numNodesSize) ;
cudaMalloc (&nodeVisited_cuda, numNodesSize);
cudaMemcpy(nodeVisited_cuda, nodeVisited_h,numNodesSize, cudaMemcpyHostToDevice);
int* nodeGate_cuda = (int*)malloc( numNodesSize) ;
cudaMalloc (&nodeGate_cuda, numNodesSize);
cudaMemcpy(nodeGate_cuda, nodeGate_h, numNodesSize, cudaMemcpyHostToDevice);
int* nodeInput_cuda = (int*)malloc( numNodesSize) ;
cudaMalloc (&nodeInput_cuda, numNodesSize);
cudaMemcpy(nodeInput_cuda, nodeInput_h, numNodesSize, cudaMemcpyHostToDevice);
int* nodeOutput_cuda = (int*)malloc(numNodesSize) ;
cudaMalloc (&nodeOutput_cuda, numNodesSize);
cudaMemcpy(nodeOutput_cuda, nodeOutput_h, numNodesSize, cudaMemcpyHostToDevice);
clock_t start = clock();
// kernel call
global_queuing_kernel <<< numBlocks, blockSize >>> (blockSize * numBlocks, numNodes, nodePtrs_cuda, currLevelNodes_cuda, nodeNeighbors_cuda, nodeVisited_cuda, nodeGate_cuda, nodeInput_cuda, nodeOutput_cuda);
clock_t end = clock();
checkCudaErr(cudaDeviceSynchronize(), "Synchronization");
checkCudaErr(cudaGetLastError(), "GPU");
cudaMemcpyFromSymbol(&numNextLevelNodes_h, numNextLevelNodes, sizeof(int), 0, cudaMemcpyDeviceToHost);
checkCudaErr(cudaMemcpyFromSymbol(nextLevelNodes_h,globalQueue, numNodesSize), "Copying");
int *outputBuffer;
outputBuffer = (int*)malloc( numNodesSize);
checkCudaErr(cudaMemcpy(outputBuffer, nodeOutput_cuda, numNodesSize, cudaMemcpyDeviceToHost), "Copying");
// write node output file
FILE *nodeOutputFile = fopen(nodeOutput_fileName, "w");
int counter = 0;
fprintf(nodeOutputFile,"%d\n",numNodes);
while (counter < numNodes) {
fprintf(nodeOutputFile,"%d\n",(outputBuffer[counter]));
counter++;
}
fclose(nodeOutputFile);
// write next level output file
FILE *nextLevelOutputFile = fopen(nextLevelNodes_fileName, "w");
counter = 0;
fprintf(nextLevelOutputFile,"%d\n",numNextLevelNodes_h);
while (counter < numNextLevelNodes_h) {
fprintf(nextLevelOutputFile,"%d\n",(nextLevelNodes_h[counter]));
counter++;
}
fclose(nextLevelOutputFile);
double runtime = (double) (end - start) / CLOCKS_PER_SEC * 1000;
printf("Execution time: %f ms\n", runtime);
// free variables
free(nodePtrs_h);
free(nodeNeighbors_h);
free(nodeVisited_h);
free(currLevelNodes_h);
free(nodeGate_h);
free(nodeInput_h);
free(nodeOutput_h);
// free cuda variables
cudaFree(currLevelNodes_cuda);
cudaFree(nodeNeighbors_cuda);
cudaFree(nodePtrs_cuda);
cudaFree(nodeVisited_cuda);
cudaFree(nodeInput_cuda);
cudaFree(nodeOutput_cuda);
cudaFree(nodeGate_cuda);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
// method to read the first, second and fourth input files
int read_input_one_two_four(int** input, char* filepath) {
FILE* fp = fopen(filepath, "r");
if (!fp) return fprintf(stderr, "Couldn't open file for reading\n");
int file_length;
fscanf(fp, "%d", &file_length);
*input = (int*) malloc(file_length * sizeof(int));
int next_int;
for (int i = 0; i < file_length; i++) {
fscanf(fp, "%d", &next_int);
(*input)[i] = next_int;
}
fclose(fp);
return file_length;
}
// method to read the third input file
int read_input_three(int** input1, int** input2, int** input3, int** input4, char* filepath) {
FILE* fp = fopen(filepath, "r");
if (!fp) return fprintf(stderr, "Couldn't open file for reading\n");
int file_length;
fscanf(fp, "%d", &file_length);
*input1 = (int*) malloc(file_length * sizeof(int));
*input2 = (int*) malloc(file_length * sizeof(int));
*input3 = (int*) malloc(file_length * sizeof(int));
*input4 = (int*) malloc(file_length * sizeof(int));
int next_int1;
int next_int2;
int next_int3;
int next_int4;
for (int i = 0; i < file_length; i++) {
fscanf(fp, "%d, %d, %d, %d", &next_int1, &next_int2, &next_int3, &next_int4);
(*input1)[i] = next_int1;
(*input2)[i] = next_int2;
(*input3)[i] = next_int3;
(*input4)[i] = next_int4;
}
fclose(fp);
return file_length;
}
__device__ int globalQueue[7000000];
__device__ int numNextLevelNodes = 0;
__global__ void global_queuing_kernel(int totalThreads, int numNodes, int* nodePtrs, int* currLevelNodes, int* nodeNeighbors, int* nodeVisited, int* nodeGate, int* nodeInput, int* nodeOutput) {
int nodesPerThread = numNodes / totalThreads;
int threadIndex = threadIdx.x + (blockDim.x * blockIdx.x);
int beginIndex = threadIndex * nodesPerThread;
//Loop over all nodes in the current level
for (int index = beginIndex; index < numNodes && index < beginIndex + nodesPerThread; index++) {
int nodeIndex = currLevelNodes[index];
//Loop over all neighbors of the node
for (int secondIndex = nodePtrs[nodeIndex]; secondIndex < nodePtrs[nodeIndex+1]; secondIndex++) {
int neighborIndex = nodeNeighbors[secondIndex];
const int alreadyVisited = atomicExch(&(nodeVisited[neighborIndex]),1);
//If the neighbor hasn’t been visited yet
if (!alreadyVisited) {
int result = 0;
int nInputV = nodeInput[neighborIndex];
int nOutputV = nodeOutput[nodeIndex];
int nGateV = nodeGate[neighborIndex];
switch (nGateV) {
case 0:
if (nInputV == 1 && nOutputV == 1) {
result = 1;
}
else {
result = 0;
}
break;
case 1:
if (nInputV == 0 && nOutputV == 0) {
result = 0;
}
else {
result = 1;
}
break;
case 2:
if (nInputV == 1 && nOutputV == 1) {
result = 0;
} else {
result = 1;
}
break;
case 3:
if (nInputV == 0 && nOutputV == 0) {
result = 1;
} else {
result = 0;
}
break;
case 4:
if (nInputV == nOutputV) {
result = 0;
} else {
result = 1;
}
break;
case 5:
if (nInputV == nOutputV) {
result = 1;
} else {
result = 0;
}
break;
}
//Update node output
nodeOutput[neighborIndex] = result;
int globalQueueIndex = atomicAdd(&numNextLevelNodes,1);
//Add it to the global queue
globalQueue[globalQueueIndex] = neighborIndex;
}
}
__syncthreads();
}
}
inline hipError_t checkCudaErr(hipError_t err, const char* msg) {
if (err != hipSuccess) {
fprintf(stderr, "Error at runtime %s: %s\n", msg, hipGetErrorString(err));
}
return err;
}
int main(int argc, char *argv[]){
if (argc < 7) {
return fprintf(stderr, "Missing input argument(s)!\n");
}
// Variables
int numNodePtrs;
int numNodes;
int *nodePtrs_h;
int *nodeNeighbors_h;
int *nodeVisited_h;
int numTotalNeighbors_h;
int *currLevelNodes_h;
int numCurrLevelNodes;
int numNextLevelNodes_h;
int *nodeGate_h;
int *nodeInput_h;
int *nodeOutput_h;
numNodePtrs = read_input_one_two_four(&nodePtrs_h, argv[1]);
numTotalNeighbors_h = read_input_one_two_four(&nodeNeighbors_h, argv[2]);
numNodes = read_input_three(&nodeVisited_h, &nodeGate_h, &nodeInput_h, &nodeOutput_h,argv[3]);
numCurrLevelNodes = read_input_one_two_four(&currLevelNodes_h, argv[4]);
char* nodeOutput_fileName = argv[5];
char* nextLevelNodes_fileName = argv[6];
// output
int *nextLevelNodes_h = (int *)malloc(numNodes*sizeof(int));
checkCudaErr(hipMemcpyToSymbol(HIP_SYMBOL(globalQueue),nextLevelNodes_h, numNodes * sizeof(int)), "Copying");
int numNodesSize = numNodes * sizeof(int);
int numBlocks = 35;
int blockSize = 128;
// Cuda variables
int* nodePtrs_cuda = (int*)malloc( numNodePtrs * sizeof(int)) ;
hipMalloc (&nodePtrs_cuda, numNodePtrs * sizeof(int));
hipMemcpy(nodePtrs_cuda, nodePtrs_h, numNodePtrs * sizeof(int), hipMemcpyHostToDevice);
int* currLevelNodes_cuda = (int*)malloc( numCurrLevelNodes * sizeof(int)) ;
hipMalloc (&currLevelNodes_cuda, numCurrLevelNodes * sizeof(int));
hipMemcpy(currLevelNodes_cuda, currLevelNodes_h, numCurrLevelNodes * sizeof(int), hipMemcpyHostToDevice);
int* nodeNeighbors_cuda = (int*)malloc( numTotalNeighbors_h * sizeof(int)) ;
hipMalloc (&nodeNeighbors_cuda, numTotalNeighbors_h * sizeof(int));
hipMemcpy(nodeNeighbors_cuda, nodeNeighbors_h, numTotalNeighbors_h * sizeof(int), hipMemcpyHostToDevice);
int* nodeVisited_cuda = (int*)malloc( numNodesSize) ;
hipMalloc (&nodeVisited_cuda, numNodesSize);
hipMemcpy(nodeVisited_cuda, nodeVisited_h,numNodesSize, hipMemcpyHostToDevice);
int* nodeGate_cuda = (int*)malloc( numNodesSize) ;
hipMalloc (&nodeGate_cuda, numNodesSize);
hipMemcpy(nodeGate_cuda, nodeGate_h, numNodesSize, hipMemcpyHostToDevice);
int* nodeInput_cuda = (int*)malloc( numNodesSize) ;
hipMalloc (&nodeInput_cuda, numNodesSize);
hipMemcpy(nodeInput_cuda, nodeInput_h, numNodesSize, hipMemcpyHostToDevice);
int* nodeOutput_cuda = (int*)malloc(numNodesSize) ;
hipMalloc (&nodeOutput_cuda, numNodesSize);
hipMemcpy(nodeOutput_cuda, nodeOutput_h, numNodesSize, hipMemcpyHostToDevice);
clock_t start = clock();
// kernel call
global_queuing_kernel <<< numBlocks, blockSize >>> (blockSize * numBlocks, numNodes, nodePtrs_cuda, currLevelNodes_cuda, nodeNeighbors_cuda, nodeVisited_cuda, nodeGate_cuda, nodeInput_cuda, nodeOutput_cuda);
clock_t end = clock();
checkCudaErr(hipDeviceSynchronize(), "Synchronization");
checkCudaErr(hipGetLastError(), "GPU");
hipMemcpyFromSymbol(&numNextLevelNodes_h, HIP_SYMBOL(numNextLevelNodes), sizeof(int), 0, hipMemcpyDeviceToHost);
checkCudaErr(hipMemcpyFromSymbol(nextLevelNodes_h,HIP_SYMBOL(globalQueue), numNodesSize), "Copying");
int *outputBuffer;
outputBuffer = (int*)malloc( numNodesSize);
checkCudaErr(hipMemcpy(outputBuffer, nodeOutput_cuda, numNodesSize, hipMemcpyDeviceToHost), "Copying");
// write node output file
FILE *nodeOutputFile = fopen(nodeOutput_fileName, "w");
int counter = 0;
fprintf(nodeOutputFile,"%d\n",numNodes);
while (counter < numNodes) {
fprintf(nodeOutputFile,"%d\n",(outputBuffer[counter]));
counter++;
}
fclose(nodeOutputFile);
// write next level output file
FILE *nextLevelOutputFile = fopen(nextLevelNodes_fileName, "w");
counter = 0;
fprintf(nextLevelOutputFile,"%d\n",numNextLevelNodes_h);
while (counter < numNextLevelNodes_h) {
fprintf(nextLevelOutputFile,"%d\n",(nextLevelNodes_h[counter]));
counter++;
}
fclose(nextLevelOutputFile);
double runtime = (double) (end - start) / CLOCKS_PER_SEC * 1000;
printf("Execution time: %f ms\n", runtime);
// free variables
free(nodePtrs_h);
free(nodeNeighbors_h);
free(nodeVisited_h);
free(currLevelNodes_h);
free(nodeGate_h);
free(nodeInput_h);
free(nodeOutput_h);
// free cuda variables
hipFree(currLevelNodes_cuda);
hipFree(nodeNeighbors_cuda);
hipFree(nodePtrs_cuda);
hipFree(nodeVisited_cuda);
hipFree(nodeInput_cuda);
hipFree(nodeOutput_cuda);
hipFree(nodeGate_cuda);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
// method to read the first, second and fourth input files
int read_input_one_two_four(int** input, char* filepath) {
FILE* fp = fopen(filepath, "r");
if (!fp) return fprintf(stderr, "Couldn't open file for reading\n");
int file_length;
fscanf(fp, "%d", &file_length);
*input = (int*) malloc(file_length * sizeof(int));
int next_int;
for (int i = 0; i < file_length; i++) {
fscanf(fp, "%d", &next_int);
(*input)[i] = next_int;
}
fclose(fp);
return file_length;
}
// method to read the third input file
int read_input_three(int** input1, int** input2, int** input3, int** input4, char* filepath) {
FILE* fp = fopen(filepath, "r");
if (!fp) return fprintf(stderr, "Couldn't open file for reading\n");
int file_length;
fscanf(fp, "%d", &file_length);
*input1 = (int*) malloc(file_length * sizeof(int));
*input2 = (int*) malloc(file_length * sizeof(int));
*input3 = (int*) malloc(file_length * sizeof(int));
*input4 = (int*) malloc(file_length * sizeof(int));
int next_int1;
int next_int2;
int next_int3;
int next_int4;
for (int i = 0; i < file_length; i++) {
fscanf(fp, "%d, %d, %d, %d", &next_int1, &next_int2, &next_int3, &next_int4);
(*input1)[i] = next_int1;
(*input2)[i] = next_int2;
(*input3)[i] = next_int3;
(*input4)[i] = next_int4;
}
fclose(fp);
return file_length;
}
__device__ int globalQueue[7000000];
__device__ int numNextLevelNodes = 0;
__global__ void global_queuing_kernel(int totalThreads, int numNodes, int* nodePtrs, int* currLevelNodes, int* nodeNeighbors, int* nodeVisited, int* nodeGate, int* nodeInput, int* nodeOutput) {
int nodesPerThread = numNodes / totalThreads;
int threadIndex = threadIdx.x + (blockDim.x * blockIdx.x);
int beginIndex = threadIndex * nodesPerThread;
//Loop over all nodes in the current level
for (int index = beginIndex; index < numNodes && index < beginIndex + nodesPerThread; index++) {
int nodeIndex = currLevelNodes[index];
//Loop over all neighbors of the node
for (int secondIndex = nodePtrs[nodeIndex]; secondIndex < nodePtrs[nodeIndex+1]; secondIndex++) {
int neighborIndex = nodeNeighbors[secondIndex];
const int alreadyVisited = atomicExch(&(nodeVisited[neighborIndex]),1);
//If the neighbor hasn’t been visited yet
if (!alreadyVisited) {
int result = 0;
int nInputV = nodeInput[neighborIndex];
int nOutputV = nodeOutput[nodeIndex];
int nGateV = nodeGate[neighborIndex];
switch (nGateV) {
case 0:
if (nInputV == 1 && nOutputV == 1) {
result = 1;
}
else {
result = 0;
}
break;
case 1:
if (nInputV == 0 && nOutputV == 0) {
result = 0;
}
else {
result = 1;
}
break;
case 2:
if (nInputV == 1 && nOutputV == 1) {
result = 0;
} else {
result = 1;
}
break;
case 3:
if (nInputV == 0 && nOutputV == 0) {
result = 1;
} else {
result = 0;
}
break;
case 4:
if (nInputV == nOutputV) {
result = 0;
} else {
result = 1;
}
break;
case 5:
if (nInputV == nOutputV) {
result = 1;
} else {
result = 0;
}
break;
}
//Update node output
nodeOutput[neighborIndex] = result;
int globalQueueIndex = atomicAdd(&numNextLevelNodes,1);
//Add it to the global queue
globalQueue[globalQueueIndex] = neighborIndex;
}
}
__syncthreads();
}
}
inline hipError_t checkCudaErr(hipError_t err, const char* msg) {
if (err != hipSuccess) {
fprintf(stderr, "Error at runtime %s: %s\n", msg, hipGetErrorString(err));
}
return err;
}
int main(int argc, char *argv[]){
if (argc < 7) {
return fprintf(stderr, "Missing input argument(s)!\n");
}
// Variables
int numNodePtrs;
int numNodes;
int *nodePtrs_h;
int *nodeNeighbors_h;
int *nodeVisited_h;
int numTotalNeighbors_h;
int *currLevelNodes_h;
int numCurrLevelNodes;
int numNextLevelNodes_h;
int *nodeGate_h;
int *nodeInput_h;
int *nodeOutput_h;
numNodePtrs = read_input_one_two_four(&nodePtrs_h, argv[1]);
numTotalNeighbors_h = read_input_one_two_four(&nodeNeighbors_h, argv[2]);
numNodes = read_input_three(&nodeVisited_h, &nodeGate_h, &nodeInput_h, &nodeOutput_h,argv[3]);
numCurrLevelNodes = read_input_one_two_four(&currLevelNodes_h, argv[4]);
char* nodeOutput_fileName = argv[5];
char* nextLevelNodes_fileName = argv[6];
// output
int *nextLevelNodes_h = (int *)malloc(numNodes*sizeof(int));
checkCudaErr(hipMemcpyToSymbol(HIP_SYMBOL(globalQueue),nextLevelNodes_h, numNodes * sizeof(int)), "Copying");
int numNodesSize = numNodes * sizeof(int);
int numBlocks = 35;
int blockSize = 128;
// Cuda variables
int* nodePtrs_cuda = (int*)malloc( numNodePtrs * sizeof(int)) ;
hipMalloc (&nodePtrs_cuda, numNodePtrs * sizeof(int));
hipMemcpy(nodePtrs_cuda, nodePtrs_h, numNodePtrs * sizeof(int), hipMemcpyHostToDevice);
int* currLevelNodes_cuda = (int*)malloc( numCurrLevelNodes * sizeof(int)) ;
hipMalloc (&currLevelNodes_cuda, numCurrLevelNodes * sizeof(int));
hipMemcpy(currLevelNodes_cuda, currLevelNodes_h, numCurrLevelNodes * sizeof(int), hipMemcpyHostToDevice);
int* nodeNeighbors_cuda = (int*)malloc( numTotalNeighbors_h * sizeof(int)) ;
hipMalloc (&nodeNeighbors_cuda, numTotalNeighbors_h * sizeof(int));
hipMemcpy(nodeNeighbors_cuda, nodeNeighbors_h, numTotalNeighbors_h * sizeof(int), hipMemcpyHostToDevice);
int* nodeVisited_cuda = (int*)malloc( numNodesSize) ;
hipMalloc (&nodeVisited_cuda, numNodesSize);
hipMemcpy(nodeVisited_cuda, nodeVisited_h,numNodesSize, hipMemcpyHostToDevice);
int* nodeGate_cuda = (int*)malloc( numNodesSize) ;
hipMalloc (&nodeGate_cuda, numNodesSize);
hipMemcpy(nodeGate_cuda, nodeGate_h, numNodesSize, hipMemcpyHostToDevice);
int* nodeInput_cuda = (int*)malloc( numNodesSize) ;
hipMalloc (&nodeInput_cuda, numNodesSize);
hipMemcpy(nodeInput_cuda, nodeInput_h, numNodesSize, hipMemcpyHostToDevice);
int* nodeOutput_cuda = (int*)malloc(numNodesSize) ;
hipMalloc (&nodeOutput_cuda, numNodesSize);
hipMemcpy(nodeOutput_cuda, nodeOutput_h, numNodesSize, hipMemcpyHostToDevice);
clock_t start = clock();
// kernel call
global_queuing_kernel <<< numBlocks, blockSize >>> (blockSize * numBlocks, numNodes, nodePtrs_cuda, currLevelNodes_cuda, nodeNeighbors_cuda, nodeVisited_cuda, nodeGate_cuda, nodeInput_cuda, nodeOutput_cuda);
clock_t end = clock();
checkCudaErr(hipDeviceSynchronize(), "Synchronization");
checkCudaErr(hipGetLastError(), "GPU");
hipMemcpyFromSymbol(&numNextLevelNodes_h, HIP_SYMBOL(numNextLevelNodes), sizeof(int), 0, hipMemcpyDeviceToHost);
checkCudaErr(hipMemcpyFromSymbol(nextLevelNodes_h,HIP_SYMBOL(globalQueue), numNodesSize), "Copying");
int *outputBuffer;
outputBuffer = (int*)malloc( numNodesSize);
checkCudaErr(hipMemcpy(outputBuffer, nodeOutput_cuda, numNodesSize, hipMemcpyDeviceToHost), "Copying");
// write node output file
FILE *nodeOutputFile = fopen(nodeOutput_fileName, "w");
int counter = 0;
fprintf(nodeOutputFile,"%d\n",numNodes);
while (counter < numNodes) {
fprintf(nodeOutputFile,"%d\n",(outputBuffer[counter]));
counter++;
}
fclose(nodeOutputFile);
// write next level output file
FILE *nextLevelOutputFile = fopen(nextLevelNodes_fileName, "w");
counter = 0;
fprintf(nextLevelOutputFile,"%d\n",numNextLevelNodes_h);
while (counter < numNextLevelNodes_h) {
fprintf(nextLevelOutputFile,"%d\n",(nextLevelNodes_h[counter]));
counter++;
}
fclose(nextLevelOutputFile);
double runtime = (double) (end - start) / CLOCKS_PER_SEC * 1000;
printf("Execution time: %f ms\n", runtime);
// free variables
free(nodePtrs_h);
free(nodeNeighbors_h);
free(nodeVisited_h);
free(currLevelNodes_h);
free(nodeGate_h);
free(nodeInput_h);
free(nodeOutput_h);
// free cuda variables
hipFree(currLevelNodes_cuda);
hipFree(nodeNeighbors_cuda);
hipFree(nodePtrs_cuda);
hipFree(nodeVisited_cuda);
hipFree(nodeInput_cuda);
hipFree(nodeOutput_cuda);
hipFree(nodeGate_cuda);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.globl _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.p2align 8
.type _Z21global_queuing_kerneliiPiS_S_S_S_S_S_,@function
_Z21global_queuing_kerneliiPiS_S_S_S_S_S_:
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s4, s[0:1], 0x4c
s_mov_b32 s16, 0
s_waitcnt lgkmcnt(0)
s_ashr_i32 s5, s2, 31
s_ashr_i32 s8, s3, 31
s_add_i32 s2, s2, s5
s_add_i32 s9, s3, s8
s_xor_b32 s2, s2, s5
s_xor_b32 s9, s9, s8
v_cvt_f32_u32_e32 v1, s2
s_sub_i32 s7, 0, s2
s_xor_b32 s5, s8, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
v_cvt_u32_f32_e32 v1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s6, v1
s_mul_i32 s7, s7, s6
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_hi_u32 s7, s6, s7
s_add_i32 s6, s6, s7
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_hi_u32 s6, s9, s6
s_mul_i32 s7, s6, s2
s_add_i32 s8, s6, 1
s_sub_i32 s7, s9, s7
s_delay_alu instid0(SALU_CYCLE_1)
s_sub_i32 s9, s7, s2
s_cmp_ge_u32 s7, s2
s_cselect_b32 s6, s8, s6
s_cselect_b32 s7, s9, s7
s_add_i32 s8, s6, 1
s_cmp_ge_u32 s7, s2
s_cselect_b32 s2, s8, s6
s_and_b32 s4, s4, 0xffff
s_xor_b32 s2, s2, s5
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_sub_i32 s2, s2, s5
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v0, v1, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, s2, v0
s_mov_b32 s2, exec_lo
v_min_i32_e32 v11, s3, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_lt_i32_e64 v0, v11
s_cbranch_execz .LBB0_34
s_clause 0x2
s_load_b256 s[4:11], s[0:1], 0x8
s_load_b128 s[12:15], s[0:1], 0x28
s_load_b64 s[2:3], s[0:1], 0x38
v_dual_mov_b32 v12, 1 :: v_dual_mov_b32 v13, 0
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s1
v_add_nc_u32_e32 v0, 1, v0
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
v_cmp_ge_i32_e32 vcc_lo, v0, v11
s_or_b32 s16, vcc_lo, s16
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s16
s_cbranch_execz .LBB0_34
.LBB0_3:
v_ashrrev_i32_e32 v1, 31, v0
s_mov_b32 s1, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
global_load_b32 v1, v[1:2], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[5:6], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s4, v5
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v6, vcc_lo
global_load_b64 v[3:4], v[1:2], off
s_waitcnt vmcnt(0)
v_cmpx_lt_i32_e64 v3, v4
s_cbranch_execz .LBB0_2
v_ashrrev_i32_e32 v4, 31, v3
s_mov_b32 s17, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[7:8], 2, v[3:4]
v_add_co_u32 v4, vcc_lo, s2, v5
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v6, vcc_lo
v_add_co_u32 v6, vcc_lo, s8, v7
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v8, vcc_lo
s_branch .LBB0_7
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s19
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s0, v9
s_getpc_b64 s[20:21]
s_add_u32 s20, s20, globalQueue@rel32@lo+4
s_addc_u32 s21, s21, globalQueue@rel32@hi+12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v9, s0, v14
v_ashrrev_i32_e32 v10, 31, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[9:10], 2, v[9:10]
v_add_co_u32 v9, vcc_lo, v9, s20
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v10, vcc_lo, s21, v10, vcc_lo
global_store_b32 v[9:10], v8, off
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s18
global_load_b32 v8, v[1:2], off offset:4
v_add_nc_u32_e32 v3, 1, v3
v_add_co_u32 v6, s0, v6, 4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v7, s0, 0, v7, s0
s_waitcnt vmcnt(0)
v_cmp_ge_i32_e32 vcc_lo, v3, v8
s_or_b32 s17, vcc_lo, s17
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s17
s_cbranch_execz .LBB0_2
.LBB0_7:
global_load_b32 v8, v[6:7], off
s_mov_b32 s18, exec_lo
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v9, 31, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[9:10], 2, v[8:9]
v_add_co_u32 v14, vcc_lo, s10, v9
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v15, vcc_lo, s11, v10, vcc_lo
global_atomic_swap_b32 v14, v[14:15], v12, off glc
s_waitcnt vmcnt(0)
v_cmpx_eq_u32_e32 0, v14
s_cbranch_execz .LBB0_6
v_add_co_u32 v14, vcc_lo, s12, v9
v_add_co_ci_u32_e32 v15, vcc_lo, s13, v10, vcc_lo
v_add_co_u32 v17, vcc_lo, s14, v9
v_add_co_ci_u32_e32 v18, vcc_lo, s15, v10, vcc_lo
s_mov_b32 s0, exec_lo
global_load_b32 v16, v[14:15], off
global_load_b32 v14, v[17:18], off
global_load_b32 v15, v[4:5], off
s_waitcnt vmcnt(2)
v_cmpx_lt_i32_e32 2, v16
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB0_20
s_mov_b32 s19, exec_lo
v_cmpx_lt_i32_e32 3, v16
s_xor_b32 s19, exec_lo, s19
s_cbranch_execz .LBB0_17
s_mov_b32 s21, exec_lo
v_cmpx_lt_i32_e32 4, v16
s_xor_b32 s21, exec_lo, s21
s_cbranch_execz .LBB0_14
s_mov_b32 s20, 0
s_mov_b32 s22, exec_lo
v_cmpx_eq_u32_e32 5, v16
s_cbranch_execz .LBB0_13
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v14, v15
s_and_b32 s20, vcc_lo, exec_lo
.LBB0_13:
s_or_b32 exec_lo, exec_lo, s22
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 s20, s20, exec_lo
.LBB0_14:
s_and_not1_saveexec_b32 s21, s21
s_cbranch_execz .LBB0_16
s_waitcnt vmcnt(0)
v_cmp_ne_u32_e32 vcc_lo, v14, v15
s_and_not1_b32 s20, s20, exec_lo
s_and_b32 s22, vcc_lo, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s20, s20, s22
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s21
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 s20, s20, exec_lo
.LBB0_17:
s_and_not1_saveexec_b32 s19, s19
s_cbranch_execz .LBB0_19
s_waitcnt vmcnt(0)
v_or_b32_e32 v14, v14, v15
s_and_not1_b32 s20, s20, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_eq_u32_e32 vcc_lo, 0, v14
s_and_b32 s21, vcc_lo, exec_lo
s_or_b32 s20, s20, s21
.LBB0_19:
s_or_b32 exec_lo, exec_lo, s19
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 s19, s20, exec_lo
.LBB0_20:
s_and_not1_saveexec_b32 s20, s0
s_cbranch_execz .LBB0_32
s_mov_b32 s0, exec_lo
v_cmpx_lt_i32_e32 0, v16
s_xor_b32 s21, exec_lo, s0
s_cbranch_execz .LBB0_27
s_mov_b32 s22, exec_lo
v_cmpx_lt_i32_e32 1, v16
s_xor_b32 s22, exec_lo, s22
s_cbranch_execz .LBB0_24
s_waitcnt vmcnt(1)
v_cmp_ne_u32_e32 vcc_lo, 1, v14
s_waitcnt vmcnt(0)
v_cmp_ne_u32_e64 s0, 1, v15
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_or_b32 s0, vcc_lo, s0
s_and_b32 s0, s0, exec_lo
.LBB0_24:
s_and_not1_saveexec_b32 s22, s22
s_cbranch_execz .LBB0_26
s_waitcnt vmcnt(0)
v_or_b32_e32 v14, v14, v15
s_and_not1_b32 s0, s0, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_ne_u32_e32 vcc_lo, 0, v14
s_and_b32 s23, vcc_lo, exec_lo
s_or_b32 s0, s0, s23
.LBB0_26:
s_or_b32 exec_lo, exec_lo, s22
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 s22, s0, exec_lo
.LBB0_27:
s_and_not1_saveexec_b32 s21, s21
s_cbranch_execz .LBB0_31
s_mov_b32 s0, 0
s_mov_b32 s23, exec_lo
v_cmpx_eq_u32_e32 0, v16
s_cbranch_execz .LBB0_30
s_waitcnt vmcnt(1)
v_cmp_eq_u32_e32 vcc_lo, 1, v14
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e64 s0, 1, v15
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s0, vcc_lo, s0
s_and_b32 s0, s0, exec_lo
.LBB0_30:
s_or_b32 exec_lo, exec_lo, s23
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_and_not1_b32 s22, s22, exec_lo
s_and_b32 s0, s0, exec_lo
s_or_b32 s22, s22, s0
.LBB0_31:
s_or_b32 exec_lo, exec_lo, s21
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_and_not1_b32 s0, s19, exec_lo
s_and_b32 s19, s22, exec_lo
s_or_b32 s19, s0, s19
.LBB0_32:
s_or_b32 exec_lo, exec_lo, s20
s_delay_alu instid0(SALU_CYCLE_1)
s_mov_b32 s0, exec_lo
v_add_co_u32 v9, vcc_lo, s2, v9
s_waitcnt vmcnt(0)
v_cndmask_b32_e64 v15, 0, 1, s19
v_mbcnt_lo_u32_b32 v14, s0, 0
v_add_co_ci_u32_e32 v10, vcc_lo, s3, v10, vcc_lo
s_mov_b32 s19, exec_lo
global_store_b32 v[9:10], v15, off
v_cmpx_eq_u32_e32 0, v14
s_cbranch_execz .LBB0_5
s_bcnt1_i32_b32 s0, s0
s_getpc_b64 s[20:21]
s_add_u32 s20, s20, numNextLevelNodes@rel32@lo+4
s_addc_u32 s21, s21, numNextLevelNodes@rel32@hi+12
v_mov_b32_e32 v9, s0
global_atomic_add_u32 v9, v13, v9, s[20:21] glc
s_branch .LBB0_5
.LBB0_34:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 320
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 19
.amdhsa_next_free_sgpr 24
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z21global_queuing_kerneliiPiS_S_S_S_S_S_, .Lfunc_end0-_Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected globalQueue
.type globalQueue,@object
.section .bss,"aw",@nobits
.globl globalQueue
.p2align 4, 0x0
globalQueue:
.zero 28000000
.size globalQueue, 28000000
.protected numNextLevelNodes
.type numNextLevelNodes,@object
.globl numNextLevelNodes
.p2align 2, 0x0
numNextLevelNodes:
.long 0
.size numNextLevelNodes, 4
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym globalQueue
.addrsig_sym numNextLevelNodes
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .offset: 64
.size: 4
.value_kind: hidden_block_count_x
- .offset: 68
.size: 4
.value_kind: hidden_block_count_y
- .offset: 72
.size: 4
.value_kind: hidden_block_count_z
- .offset: 76
.size: 2
.value_kind: hidden_group_size_x
- .offset: 78
.size: 2
.value_kind: hidden_group_size_y
- .offset: 80
.size: 2
.value_kind: hidden_group_size_z
- .offset: 82
.size: 2
.value_kind: hidden_remainder_x
- .offset: 84
.size: 2
.value_kind: hidden_remainder_y
- .offset: 86
.size: 2
.value_kind: hidden_remainder_z
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 128
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 320
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 26
.sgpr_spill_count: 0
.symbol: _Z21global_queuing_kerneliiPiS_S_S_S_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 19
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
// method to read the first, second and fourth input files
int read_input_one_two_four(int** input, char* filepath) {
FILE* fp = fopen(filepath, "r");
if (!fp) return fprintf(stderr, "Couldn't open file for reading\n");
int file_length;
fscanf(fp, "%d", &file_length);
*input = (int*) malloc(file_length * sizeof(int));
int next_int;
for (int i = 0; i < file_length; i++) {
fscanf(fp, "%d", &next_int);
(*input)[i] = next_int;
}
fclose(fp);
return file_length;
}
// method to read the third input file
int read_input_three(int** input1, int** input2, int** input3, int** input4, char* filepath) {
FILE* fp = fopen(filepath, "r");
if (!fp) return fprintf(stderr, "Couldn't open file for reading\n");
int file_length;
fscanf(fp, "%d", &file_length);
*input1 = (int*) malloc(file_length * sizeof(int));
*input2 = (int*) malloc(file_length * sizeof(int));
*input3 = (int*) malloc(file_length * sizeof(int));
*input4 = (int*) malloc(file_length * sizeof(int));
int next_int1;
int next_int2;
int next_int3;
int next_int4;
for (int i = 0; i < file_length; i++) {
fscanf(fp, "%d, %d, %d, %d", &next_int1, &next_int2, &next_int3, &next_int4);
(*input1)[i] = next_int1;
(*input2)[i] = next_int2;
(*input3)[i] = next_int3;
(*input4)[i] = next_int4;
}
fclose(fp);
return file_length;
}
__device__ int globalQueue[7000000];
__device__ int numNextLevelNodes = 0;
__global__ void global_queuing_kernel(int totalThreads, int numNodes, int* nodePtrs, int* currLevelNodes, int* nodeNeighbors, int* nodeVisited, int* nodeGate, int* nodeInput, int* nodeOutput) {
int nodesPerThread = numNodes / totalThreads;
int threadIndex = threadIdx.x + (blockDim.x * blockIdx.x);
int beginIndex = threadIndex * nodesPerThread;
//Loop over all nodes in the current level
for (int index = beginIndex; index < numNodes && index < beginIndex + nodesPerThread; index++) {
int nodeIndex = currLevelNodes[index];
//Loop over all neighbors of the node
for (int secondIndex = nodePtrs[nodeIndex]; secondIndex < nodePtrs[nodeIndex+1]; secondIndex++) {
int neighborIndex = nodeNeighbors[secondIndex];
const int alreadyVisited = atomicExch(&(nodeVisited[neighborIndex]),1);
//If the neighbor hasn’t been visited yet
if (!alreadyVisited) {
int result = 0;
int nInputV = nodeInput[neighborIndex];
int nOutputV = nodeOutput[nodeIndex];
int nGateV = nodeGate[neighborIndex];
switch (nGateV) {
case 0:
if (nInputV == 1 && nOutputV == 1) {
result = 1;
}
else {
result = 0;
}
break;
case 1:
if (nInputV == 0 && nOutputV == 0) {
result = 0;
}
else {
result = 1;
}
break;
case 2:
if (nInputV == 1 && nOutputV == 1) {
result = 0;
} else {
result = 1;
}
break;
case 3:
if (nInputV == 0 && nOutputV == 0) {
result = 1;
} else {
result = 0;
}
break;
case 4:
if (nInputV == nOutputV) {
result = 0;
} else {
result = 1;
}
break;
case 5:
if (nInputV == nOutputV) {
result = 1;
} else {
result = 0;
}
break;
}
//Update node output
nodeOutput[neighborIndex] = result;
int globalQueueIndex = atomicAdd(&numNextLevelNodes,1);
//Add it to the global queue
globalQueue[globalQueueIndex] = neighborIndex;
}
}
__syncthreads();
}
}
inline hipError_t checkCudaErr(hipError_t err, const char* msg) {
if (err != hipSuccess) {
fprintf(stderr, "Error at runtime %s: %s\n", msg, hipGetErrorString(err));
}
return err;
}
int main(int argc, char *argv[]){
if (argc < 7) {
return fprintf(stderr, "Missing input argument(s)!\n");
}
// Variables
int numNodePtrs;
int numNodes;
int *nodePtrs_h;
int *nodeNeighbors_h;
int *nodeVisited_h;
int numTotalNeighbors_h;
int *currLevelNodes_h;
int numCurrLevelNodes;
int numNextLevelNodes_h;
int *nodeGate_h;
int *nodeInput_h;
int *nodeOutput_h;
numNodePtrs = read_input_one_two_four(&nodePtrs_h, argv[1]);
numTotalNeighbors_h = read_input_one_two_four(&nodeNeighbors_h, argv[2]);
numNodes = read_input_three(&nodeVisited_h, &nodeGate_h, &nodeInput_h, &nodeOutput_h,argv[3]);
numCurrLevelNodes = read_input_one_two_four(&currLevelNodes_h, argv[4]);
char* nodeOutput_fileName = argv[5];
char* nextLevelNodes_fileName = argv[6];
// output
int *nextLevelNodes_h = (int *)malloc(numNodes*sizeof(int));
checkCudaErr(hipMemcpyToSymbol(HIP_SYMBOL(globalQueue),nextLevelNodes_h, numNodes * sizeof(int)), "Copying");
int numNodesSize = numNodes * sizeof(int);
int numBlocks = 35;
int blockSize = 128;
// Cuda variables
int* nodePtrs_cuda = (int*)malloc( numNodePtrs * sizeof(int)) ;
hipMalloc (&nodePtrs_cuda, numNodePtrs * sizeof(int));
hipMemcpy(nodePtrs_cuda, nodePtrs_h, numNodePtrs * sizeof(int), hipMemcpyHostToDevice);
int* currLevelNodes_cuda = (int*)malloc( numCurrLevelNodes * sizeof(int)) ;
hipMalloc (&currLevelNodes_cuda, numCurrLevelNodes * sizeof(int));
hipMemcpy(currLevelNodes_cuda, currLevelNodes_h, numCurrLevelNodes * sizeof(int), hipMemcpyHostToDevice);
int* nodeNeighbors_cuda = (int*)malloc( numTotalNeighbors_h * sizeof(int)) ;
hipMalloc (&nodeNeighbors_cuda, numTotalNeighbors_h * sizeof(int));
hipMemcpy(nodeNeighbors_cuda, nodeNeighbors_h, numTotalNeighbors_h * sizeof(int), hipMemcpyHostToDevice);
int* nodeVisited_cuda = (int*)malloc( numNodesSize) ;
hipMalloc (&nodeVisited_cuda, numNodesSize);
hipMemcpy(nodeVisited_cuda, nodeVisited_h,numNodesSize, hipMemcpyHostToDevice);
int* nodeGate_cuda = (int*)malloc( numNodesSize) ;
hipMalloc (&nodeGate_cuda, numNodesSize);
hipMemcpy(nodeGate_cuda, nodeGate_h, numNodesSize, hipMemcpyHostToDevice);
int* nodeInput_cuda = (int*)malloc( numNodesSize) ;
hipMalloc (&nodeInput_cuda, numNodesSize);
hipMemcpy(nodeInput_cuda, nodeInput_h, numNodesSize, hipMemcpyHostToDevice);
int* nodeOutput_cuda = (int*)malloc(numNodesSize) ;
hipMalloc (&nodeOutput_cuda, numNodesSize);
hipMemcpy(nodeOutput_cuda, nodeOutput_h, numNodesSize, hipMemcpyHostToDevice);
clock_t start = clock();
// kernel call
global_queuing_kernel <<< numBlocks, blockSize >>> (blockSize * numBlocks, numNodes, nodePtrs_cuda, currLevelNodes_cuda, nodeNeighbors_cuda, nodeVisited_cuda, nodeGate_cuda, nodeInput_cuda, nodeOutput_cuda);
clock_t end = clock();
checkCudaErr(hipDeviceSynchronize(), "Synchronization");
checkCudaErr(hipGetLastError(), "GPU");
hipMemcpyFromSymbol(&numNextLevelNodes_h, HIP_SYMBOL(numNextLevelNodes), sizeof(int), 0, hipMemcpyDeviceToHost);
checkCudaErr(hipMemcpyFromSymbol(nextLevelNodes_h,HIP_SYMBOL(globalQueue), numNodesSize), "Copying");
int *outputBuffer;
outputBuffer = (int*)malloc( numNodesSize);
checkCudaErr(hipMemcpy(outputBuffer, nodeOutput_cuda, numNodesSize, hipMemcpyDeviceToHost), "Copying");
// write node output file
FILE *nodeOutputFile = fopen(nodeOutput_fileName, "w");
int counter = 0;
fprintf(nodeOutputFile,"%d\n",numNodes);
while (counter < numNodes) {
fprintf(nodeOutputFile,"%d\n",(outputBuffer[counter]));
counter++;
}
fclose(nodeOutputFile);
// write next level output file
FILE *nextLevelOutputFile = fopen(nextLevelNodes_fileName, "w");
counter = 0;
fprintf(nextLevelOutputFile,"%d\n",numNextLevelNodes_h);
while (counter < numNextLevelNodes_h) {
fprintf(nextLevelOutputFile,"%d\n",(nextLevelNodes_h[counter]));
counter++;
}
fclose(nextLevelOutputFile);
double runtime = (double) (end - start) / CLOCKS_PER_SEC * 1000;
printf("Execution time: %f ms\n", runtime);
// free variables
free(nodePtrs_h);
free(nodeNeighbors_h);
free(nodeVisited_h);
free(currLevelNodes_h);
free(nodeGate_h);
free(nodeInput_h);
free(nodeOutput_h);
// free cuda variables
hipFree(currLevelNodes_cuda);
hipFree(nodeNeighbors_cuda);
hipFree(nodePtrs_cuda);
hipFree(nodeVisited_cuda);
hipFree(nodeInput_cuda);
hipFree(nodeOutput_cuda);
hipFree(nodeGate_cuda);
return 0;
} | .text
.file "global_queueing.hip"
.globl _Z23read_input_one_two_fourPPiPc # -- Begin function _Z23read_input_one_two_fourPPiPc
.p2align 4, 0x90
.type _Z23read_input_one_two_fourPPiPc,@function
_Z23read_input_one_two_fourPPiPc: # @_Z23read_input_one_two_fourPPiPc
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rax
movq %rdi, %rbx
movl $.L.str, %esi
movq %rax, %rdi
callq fopen
testq %rax, %rax
je .LBB0_5
# %bb.1:
movq %rax, %r14
movq %rsp, %rdx
movl $.L.str.2, %esi
movq %rax, %rdi
xorl %eax, %eax
callq __isoc23_fscanf
movslq (%rsp), %r15
leaq (,%r15,4), %rdi
callq malloc
movq %rax, (%rbx)
testq %r15, %r15
jle .LBB0_4
# %bb.2: # %.lr.ph.preheader
leaq 4(%rsp), %r15
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB0_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str.2, %esi
movq %r14, %rdi
movq %r15, %rdx
xorl %eax, %eax
callq __isoc23_fscanf
movl 4(%rsp), %eax
movq (%rbx), %rcx
movl %eax, (%rcx,%r12,4)
incq %r12
movslq (%rsp), %rax
cmpq %rax, %r12
jl .LBB0_3
.LBB0_4: # %._crit_edge
movq %r14, %rdi
callq fclose
movl (%rsp), %eax
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB0_5:
.cfi_def_cfa_offset 48
movq stderr(%rip), %rdi
movl $.L.str.1, %esi
xorl %eax, %eax
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
jmp fprintf # TAILCALL
.Lfunc_end0:
.size _Z23read_input_one_two_fourPPiPc, .Lfunc_end0-_Z23read_input_one_two_fourPPiPc
.cfi_endproc
# -- End function
.globl _Z16read_input_threePPiS0_S0_S0_Pc # -- Begin function _Z16read_input_threePPiS0_S0_S0_Pc
.p2align 4, 0x90
.type _Z16read_input_threePPiS0_S0_S0_Pc,@function
_Z16read_input_threePPiS0_S0_S0_Pc: # @_Z16read_input_threePPiS0_S0_S0_Pc
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rcx, %rbx
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %r12
movl $.L.str, %esi
movq %r8, %rdi
callq fopen
testq %rax, %rax
je .LBB1_5
# %bb.1:
movq %rax, %r13
leaq 4(%rsp), %rdx
movl $.L.str.2, %esi
movq %rax, %rdi
xorl %eax, %eax
callq __isoc23_fscanf
movslq 4(%rsp), %rbp
shlq $2, %rbp
movq %rbp, %rdi
callq malloc
movq %rax, (%r12)
movq %rbp, %rdi
callq malloc
movq %rax, (%r15)
movq %rbp, %rdi
callq malloc
movq %rax, (%r14)
movq %rbp, %rdi
callq malloc
movq %rax, (%rbx)
cmpl $0, 4(%rsp)
jle .LBB1_4
# %bb.2: # %.lr.ph.preheader
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str.3, %esi
movq %r13, %rdi
leaq 20(%rsp), %rdx
leaq 16(%rsp), %rcx
leaq 12(%rsp), %r8
leaq 8(%rsp), %r9
xorl %eax, %eax
callq __isoc23_fscanf
movl 20(%rsp), %eax
movq (%r12), %rcx
movl %eax, (%rcx,%rbp,4)
movl 16(%rsp), %eax
movq (%r15), %rcx
movl %eax, (%rcx,%rbp,4)
movl 12(%rsp), %eax
movq (%r14), %rcx
movl %eax, (%rcx,%rbp,4)
movl 8(%rsp), %eax
movq (%rbx), %rcx
movl %eax, (%rcx,%rbp,4)
incq %rbp
movslq 4(%rsp), %rax
cmpq %rax, %rbp
jl .LBB1_3
.LBB1_4: # %._crit_edge
movq %r13, %rdi
callq fclose
movl 4(%rsp), %eax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_5:
.cfi_def_cfa_offset 80
movq stderr(%rip), %rdi
movl $.L.str.1, %esi
xorl %eax, %eax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp fprintf # TAILCALL
.Lfunc_end1:
.size _Z16read_input_threePPiS0_S0_S0_Pc, .Lfunc_end1-_Z16read_input_threePPiS0_S0_S0_Pc
.cfi_endproc
# -- End function
.globl _Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_ # -- Begin function _Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_
.p2align 4, 0x90
.type _Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_,@function
_Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_: # @_Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z21global_queuing_kerneliiPiS_S_S_S_S_S_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end2:
.size _Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_, .Lfunc_end2-_Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0x412e848000000000 # double 1.0E+6
.LCPI3_1:
.quad 0x408f400000000000 # double 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
cmpl $6, %edi
jle .LBB3_20
# %bb.1:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $328, %rsp # imm = 0x148
.cfi_def_cfa_offset 384
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movq 8(%rsi), %rsi
leaq 120(%rsp), %rdi
callq _Z23read_input_one_two_fourPPiPc
movl %eax, %r12d
movq 16(%rbx), %rsi
leaq 112(%rsp), %rdi
callq _Z23read_input_one_two_fourPPiPc
movl %eax, %r15d
movq 24(%rbx), %r8
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq _Z16read_input_threePPiS0_S0_S0_Pc
movl %eax, %ebp
movq 32(%rbx), %rsi
leaq 96(%rsp), %rdi
callq _Z23read_input_one_two_fourPPiPc
movl %eax, %r13d
movq 40(%rbx), %rax
movq %rax, 136(%rsp) # 8-byte Spill
movq 48(%rbx), %rax
movq %rax, 144(%rsp) # 8-byte Spill
movl %ebp, 60(%rsp) # 4-byte Spill
movslq %ebp, %r14
leaq (,%r14,4), %rbp
movq %rbp, %rdi
callq malloc
movq %rax, %rbx
movl $globalQueue, %edi
movq %rax, %rsi
movq %rbp, %rdx
xorl %ecx, %ecx
movl $1, %r8d
callq hipMemcpyToSymbol
testl %eax, %eax
jne .LBB3_2
.LBB3_3: # %_Z12checkCudaErr10hipError_tPKc.exit
movslq %r12d, %r12
shlq $2, %r12
movq %r12, %rdi
callq malloc
movq %rax, 48(%rsp)
leaq 48(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
movq 48(%rsp), %rdi
movq 120(%rsp), %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
movslq %r13d, %r12
shlq $2, %r12
movq %r12, %rdi
callq malloc
movq %rax, 40(%rsp)
leaq 40(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
movq 40(%rsp), %rdi
movq 96(%rsp), %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
movslq %r15d, %r15
shlq $2, %r15
movq %r15, %rdi
callq malloc
movq %rax, 32(%rsp)
leaq 32(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movq 32(%rsp), %rdi
movq 112(%rsp), %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq %r14, %r15
shlq $34, %r15
sarq $32, %r15
movq %r15, %rdi
callq malloc
movq %rax, 24(%rsp)
leaq 24(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq 104(%rsp), %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq %r15, %rdi
callq malloc
movq %rax, 16(%rsp)
leaq 16(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq 88(%rsp), %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq %r15, %rdi
callq malloc
movq %rax, 8(%rsp)
leaq 8(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movq 80(%rsp), %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq %r15, %rdi
callq malloc
movq %rax, (%rsp)
movq %rsp, %rdi
movq %r15, %rsi
callq hipMalloc
movq (%rsp), %rdi
movq 72(%rsp), %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
callq clock
movq %rax, 128(%rsp) # 8-byte Spill
movabsq $4294967331, %rdi # imm = 0x100000023
leaq 93(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_5
# %bb.4:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
movq 8(%rsp), %r8
movq (%rsp), %r9
movl $4480, 68(%rsp) # imm = 0x1180
movl %r14d, 64(%rsp)
movq %rax, 248(%rsp)
movq %rcx, 240(%rsp)
movq %rdx, 232(%rsp)
movq %rsi, 224(%rsp)
movq %rdi, 216(%rsp)
movq %r8, 208(%rsp)
movq %r9, 200(%rsp)
leaq 68(%rsp), %rax
movq %rax, 256(%rsp)
leaq 64(%rsp), %rax
movq %rax, 264(%rsp)
leaq 248(%rsp), %rax
movq %rax, 272(%rsp)
leaq 240(%rsp), %rax
movq %rax, 280(%rsp)
leaq 232(%rsp), %rax
movq %rax, 288(%rsp)
leaq 224(%rsp), %rax
movq %rax, 296(%rsp)
leaq 216(%rsp), %rax
movq %rax, 304(%rsp)
leaq 208(%rsp), %rax
movq %rax, 312(%rsp)
leaq 200(%rsp), %rax
movq %rax, 320(%rsp)
leaq 184(%rsp), %rdi
leaq 168(%rsp), %rsi
leaq 160(%rsp), %rdx
leaq 152(%rsp), %rcx
callq __hipPopCallConfiguration
movq 184(%rsp), %rsi
movl 192(%rsp), %edx
movq 168(%rsp), %rcx
movl 176(%rsp), %r8d
leaq 256(%rsp), %r9
movl $_Z21global_queuing_kerneliiPiS_S_S_S_S_S_, %edi
pushq 152(%rsp)
.cfi_adjust_cfa_offset 8
pushq 168(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_5:
callq clock
movq %rax, %r12
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB3_6
.LBB3_7: # %_Z12checkCudaErr10hipError_tPKc.exit67
callq hipGetLastError
testl %eax, %eax
jne .LBB3_8
.LBB3_9: # %_Z12checkCudaErr10hipError_tPKc.exit69
leaq 256(%rsp), %rdi
movl $numNextLevelNodes, %esi
movl $4, %edx
xorl %ecx, %ecx
movl $2, %r8d
callq hipMemcpyFromSymbol
movl $globalQueue, %esi
movq %rbx, %rdi
movq %r15, %rdx
xorl %ecx, %ecx
movl $2, %r8d
callq hipMemcpyFromSymbol
testl %eax, %eax
jne .LBB3_10
.LBB3_11: # %_Z12checkCudaErr10hipError_tPKc.exit71
movq %r15, %rdi
callq malloc
movq %rax, %r13
movq (%rsp), %rsi
movq %rax, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB3_12
.LBB3_13: # %_Z12checkCudaErr10hipError_tPKc.exit73
movl $.L.str.8, %esi
movq 136(%rsp), %rdi # 8-byte Reload
callq fopen
movq %rax, %r15
movl $.L.str.9, %esi
movq %rax, %rdi
movl %r14d, %edx
xorl %eax, %eax
callq fprintf
movl 60(%rsp), %eax # 4-byte Reload
testl %eax, %eax
jle .LBB3_16
# %bb.14: # %.lr.ph.preheader
movl %eax, %r14d
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB3_15: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl (%r13,%rbp,4), %edx
movl $.L.str.9, %esi
movq %r15, %rdi
xorl %eax, %eax
callq fprintf
incq %rbp
cmpq %rbp, %r14
jne .LBB3_15
.LBB3_16: # %._crit_edge
movq %r15, %rdi
callq fclose
movl $.L.str.8, %esi
movq 144(%rsp), %rdi # 8-byte Reload
callq fopen
movq %rax, %r14
movl 256(%rsp), %edx
movl $.L.str.9, %esi
movq %rax, %rdi
xorl %eax, %eax
callq fprintf
cmpl $0, 256(%rsp)
jle .LBB3_19
# %bb.17: # %.lr.ph80.preheader
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_18: # %.lr.ph80
# =>This Inner Loop Header: Depth=1
movl (%rbx,%r15,4), %edx
movl $.L.str.9, %esi
movq %r14, %rdi
xorl %eax, %eax
callq fprintf
incq %r15
movslq 256(%rsp), %rax
cmpq %rax, %r15
jl .LBB3_18
.LBB3_19: # %._crit_edge81
movq %r14, %rdi
callq fclose
subq 128(%rsp), %r12 # 8-byte Folded Reload
cvtsi2sd %r12, %xmm0
divsd .LCPI3_0(%rip), %xmm0
mulsd .LCPI3_1(%rip), %xmm0
movl $.L.str.10, %edi
movb $1, %al
callq printf
movq 120(%rsp), %rdi
callq free
movq 112(%rsp), %rdi
callq free
movq 104(%rsp), %rdi
callq free
movq 96(%rsp), %rdi
callq free
movq 88(%rsp), %rdi
callq free
movq 80(%rsp), %rdi
callq free
movq 72(%rsp), %rdi
callq free
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $328, %rsp # imm = 0x148
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_20:
.cfi_restore %rbx
.cfi_restore %rbp
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
movq stderr(%rip), %rdi
movl $.L.str.4, %esi
xorl %eax, %eax
jmp fprintf # TAILCALL
.LBB3_2:
.cfi_def_cfa_offset 384
.cfi_offset %rbx, -56
.cfi_offset %rbp, -16
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq stderr(%rip), %rbp
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.5, %edx
movq %rbp, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
jmp .LBB3_3
.LBB3_6:
movq stderr(%rip), %r13
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.6, %edx
movq %r13, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
jmp .LBB3_7
.LBB3_8:
movq stderr(%rip), %r13
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.7, %edx
movq %r13, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
jmp .LBB3_9
.LBB3_10:
movq stderr(%rip), %r13
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.5, %edx
movq %r13, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
jmp .LBB3_11
.LBB3_12:
movq stderr(%rip), %r15
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.5, %edx
movq %r15, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
jmp .LBB3_13
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
subq $32, %rsp
.cfi_adjust_cfa_offset 32
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21global_queuing_kerneliiPiS_S_S_S_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
addq $32, %rsp
.cfi_adjust_cfa_offset -32
movl $globalQueue, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $28000000, %r9d # imm = 0x1AB3F00
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $numNextLevelNodes, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $__hip_module_dtor, %edi
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "r"
.size .L.str, 2
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Couldn't open file for reading\n"
.size .L.str.1, 32
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%d"
.size .L.str.2, 3
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "%d, %d, %d, %d"
.size .L.str.3, 15
.type globalQueue,@object # @globalQueue
.local globalQueue
.comm globalQueue,28000000,16
.type numNextLevelNodes,@object # @numNextLevelNodes
.local numNextLevelNodes
.comm numNextLevelNodes,4,4
.type _Z21global_queuing_kerneliiPiS_S_S_S_S_S_,@object # @_Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.section .rodata,"a",@progbits
.globl _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.p2align 3, 0x0
_Z21global_queuing_kerneliiPiS_S_S_S_S_S_:
.quad _Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_
.size _Z21global_queuing_kerneliiPiS_S_S_S_S_S_, 8
.type .L.str.4,@object # @.str.4
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.4:
.asciz "Missing input argument(s)!\n"
.size .L.str.4, 28
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Copying"
.size .L.str.5, 8
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Synchronization"
.size .L.str.6, 16
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "GPU"
.size .L.str.7, 4
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "w"
.size .L.str.8, 2
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "%d\n"
.size .L.str.9, 4
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Execution time: %f ms\n"
.size .L.str.10, 23
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "Error at runtime %s: %s\n"
.size .L.str.11, 25
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z21global_queuing_kerneliiPiS_S_S_S_S_S_"
.size .L__unnamed_1, 42
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "globalQueue"
.size .L__unnamed_2, 12
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "numNextLevelNodes"
.size .L__unnamed_3, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym globalQueue
.addrsig_sym numNextLevelNodes
.addrsig_sym _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IABS R5, c[0x0][0x160] ; /* 0x0000580000057a13 */
/* 0x000fe20000000000 */
/*0020*/ ULDC.64 UR4, c[0x0][0x160] ; /* 0x0000580000047ab9 */
/* 0x000fe40000000a00 */
/*0030*/ ULOP3.LUT UR4, UR5, UR4, URZ, 0x3c, !UPT ; /* 0x0000000405047292 */
/* 0x000fe2000f8e3c3f */
/*0040*/ I2F.RP R0, R5 ; /* 0x0000000500007306 */
/* 0x000e2a0000209400 */
/*0050*/ ISETP.LE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fc6000bf23270 */
/*0060*/ MUFU.RCP R0, R0 ; /* 0x0000000000007308 */
/* 0x001e240000001000 */
/*0070*/ IADD3 R2, R0, 0xffffffe, RZ ; /* 0x0ffffffe00027810 */
/* 0x001fcc0007ffe0ff */
/*0080*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*0090*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x001fe400078e00ff */
/*00a0*/ IMAD.MOV R4, RZ, RZ, -R3 ; /* 0x000000ffff047224 */
/* 0x002fc800078e0a03 */
/*00b0*/ IMAD R7, R4, R5, RZ ; /* 0x0000000504077224 */
/* 0x000fe200078e02ff */
/*00c0*/ IABS R4, c[0x0][0x164] ; /* 0x0000590000047a13 */
/* 0x000fc60000000000 */
/*00d0*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fe400078e0002 */
/*00e0*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*00f0*/ IMAD.HI.U32 R3, R3, R4, RZ ; /* 0x0000000403037227 */
/* 0x000fc800078e00ff */
/*0100*/ IMAD.MOV R0, RZ, RZ, -R3 ; /* 0x000000ffff007224 */
/* 0x000fc800078e0a03 */
/*0110*/ IMAD R0, R5, R0, R4 ; /* 0x0000000005007224 */
/* 0x000fca00078e0204 */
/*0120*/ ISETP.GT.U32.AND P2, PT, R5, R0, PT ; /* 0x000000000500720c */
/* 0x000fda0003f44070 */
/*0130*/ @!P2 IMAD.IADD R0, R0, 0x1, -R5 ; /* 0x000000010000a824 */
/* 0x000fe200078e0a05 */
/*0140*/ @!P2 IADD3 R3, R3, 0x1, RZ ; /* 0x000000010303a810 */
/* 0x000fe40007ffe0ff */
/*0150*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x160], PT ; /* 0x00005800ff007a0c */
/* 0x000fe40003f45270 */
/*0160*/ ISETP.GE.U32.AND P0, PT, R0, R5, PT ; /* 0x000000050000720c */
/* 0x000fe40003f06070 */
/*0170*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e360000002100 */
/*0180*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fca0007ffe0ff */
/*0190*/ @!P1 IMAD.MOV R3, RZ, RZ, -R3 ; /* 0x000000ffff039224 */
/* 0x000fe200078e0a03 */
/*01a0*/ @!P2 LOP3.LUT R3, RZ, c[0x0][0x160], RZ, 0x33, !PT ; /* 0x00005800ff03aa12 */
/* 0x000fc800078e33ff */
/*01b0*/ ISETP.GE.AND P0, PT, R3, 0x1, PT ; /* 0x000000010300780c */
/* 0x000fe20003f06270 */
/*01c0*/ IMAD R0, R2, c[0x0][0x0], R5 ; /* 0x0000000002007a24 */
/* 0x001fc800078e0205 */
/*01d0*/ IMAD R0, R0, R3, RZ ; /* 0x0000000300007224 */
/* 0x000fca00078e02ff */
/*01e0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x164], !P0 ; /* 0x0000590000007a0c */
/* 0x000fda0004706670 */
/*01f0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0200*/ IMAD.IADD R6, R3, 0x1, R0 ; /* 0x0000000103067824 */
/* 0x000fe200078e0200 */
/*0210*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0220*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4 ; /* 0x00000004ff097424 */
/* 0x000fc800078e00ff */
/*0230*/ IMAD.WIDE R4, R0, R9, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x000fca00078e0209 */
/*0240*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */
/* 0x000ea4000c1e1900 */
/*0250*/ IMAD.WIDE R2, R10, R9, c[0x0][0x168] ; /* 0x00005a000a027625 */
/* 0x004fca00078e0209 */
/*0260*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ea8000c1e1900 */
/*0270*/ LDG.E R8, [R2.64+0x4] ; /* 0x0000040402087981 */
/* 0x000ea4000c1e1900 */
/*0280*/ ISETP.GE.AND P0, PT, R7, R8, PT ; /* 0x000000080700720c */
/* 0x004fda0003f06270 */
/*0290*/ @P0 BRA 0x7b0 ; /* 0x0000051000000947 */
/* 0x000fea0003800000 */
/*02a0*/ SHF.R.S32.HI R5, RZ, 0x1f, R10 ; /* 0x0000001fff057819 */
/* 0x000fe2000001140a */
/*02b0*/ IMAD.WIDE R8, R7, R9, c[0x0][0x178] ; /* 0x00005e0007087625 */
/* 0x000fe200078e0209 */
/*02c0*/ LEA R4, P0, R10, c[0x0][0x198], 0x2 ; /* 0x000066000a047a11 */
/* 0x000fc800078010ff */
/*02d0*/ LEA.HI.X R5, R10, c[0x0][0x19c], R5, 0x2, P0 ; /* 0x000067000a057a11 */
/* 0x000fe400000f1405 */
/*02e0*/ LDG.E R10, [R8.64] ; /* 0x00000004080a7981 */
/* 0x000ea2000c1e1900 */
/*02f0*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */
/* 0x000fe200078e00ff */
/*0300*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*0310*/ IMAD.MOV.U32 R15, RZ, RZ, 0x1 ; /* 0x00000001ff0f7424 */
/* 0x000fe400078e00ff */
/*0320*/ IMAD.WIDE R12, R10, R11, c[0x0][0x180] ; /* 0x000060000a0c7625 */
/* 0x004fcc00078e020b */
/*0330*/ ATOMG.E.EXCH.STRONG.GPU PT, R12, [R12.64], R15 ; /* 0x0000000f0c0c79a8 */
/* 0x000ea2000c1ee1c4 */
/*0340*/ BSSY B0, 0x750 ; /* 0x0000040000007945 */
/* 0x000fe20003800000 */
/*0350*/ ISETP.NE.AND P0, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */
/* 0x004fda0003f05270 */
/*0360*/ @P0 BRA 0x740 ; /* 0x000003d000000947 */
/* 0x000fea0003800000 */
/*0370*/ IMAD.SHL.U32 R12, R10.reuse, 0x4, RZ ; /* 0x000000040a0c7824 */
/* 0x040fe200078e00ff */
/*0380*/ SHF.R.S32.HI R13, RZ, 0x1f, R10 ; /* 0x0000001fff0d7819 */
/* 0x000fe2000001140a */
/*0390*/ LDG.E R19, [R4.64] ; /* 0x0000000404137981 */
/* 0x000166000c1e1900 */
/*03a0*/ SHF.L.U64.HI R13, R10, 0x2, R13 ; /* 0x000000020a0d7819 */
/* 0x000fe4000001020d */
/*03b0*/ IADD3 R16, P0, R12, c[0x0][0x188], RZ ; /* 0x000062000c107a10 */
/* 0x000fc80007f1e0ff */
/*03c0*/ IADD3.X R17, R13, c[0x0][0x18c], RZ, P0, !PT ; /* 0x000063000d117a10 */
/* 0x000fca00007fe4ff */
/*03d0*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000ea2000c1e1900 */
/*03e0*/ IADD3 R14, P0, R12, c[0x0][0x190], RZ ; /* 0x000064000c0e7a10 */
/* 0x000fc80007f1e0ff */
/*03f0*/ IADD3.X R15, R13, c[0x0][0x194], RZ, P0, !PT ; /* 0x000065000d0f7a10 */
/* 0x000fca00007fe4ff */
/*0400*/ LDG.E R18, [R14.64] ; /* 0x000000040e127981 */
/* 0x000162000c1e1900 */
/*0410*/ BSSY B1, 0x610 ; /* 0x000001f000017945 */
/* 0x000fe20003800000 */
/*0420*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0430*/ ISETP.GT.AND P1, PT, R16, 0x2, PT ; /* 0x000000021000780c */
/* 0x004fda0003f24270 */
/*0440*/ @P1 BRA 0x520 ; /* 0x000000d000001947 */
/* 0x000fea0003800000 */
/*0450*/ IMNMX.U32 R14, R16, 0x3, PT ; /* 0x00000003100e7817 */
/* 0x001fca0003800000 */
/*0460*/ IMAD.SHL.U32 R16, R14, 0x4, RZ ; /* 0x000000040e107824 */
/* 0x000fc800078e00ff */
/*0470*/ LDC R14, c[0x2][R16] ; /* 0x00800000100e7b82 */
/* 0x000e240000000800 */
/*0480*/ SHF.R.S32.HI R15, RZ, 0x1f, R14 ; /* 0x0000001fff0f7819 */
/* 0x001fc8000001140e */
/*0490*/ BRX R14 -0x4a0 ; /* 0xfffffb600e007949 */
/* 0x000fea000383ffff */
/*04a0*/ ISETP.NE.AND P0, PT, R19, 0x1, PT ; /* 0x000000011300780c */
/* 0x020fc80003f05270 */
/*04b0*/ ISETP.NE.OR P0, PT, R18, 0x1, P0 ; /* 0x000000011200780c */
/* 0x000fe20000705670 */
/*04c0*/ BRA 0x600 ; /* 0x0000013000007947 */
/* 0x000fee0003800000 */
/*04d0*/ ISETP.NE.AND P0, PT, R19, 0x1, PT ; /* 0x000000011300780c */
/* 0x020fc80003f05270 */
/*04e0*/ ISETP.EQ.AND P0, PT, R18, 0x1, !P0 ; /* 0x000000011200780c */
/* 0x000fe20004702270 */
/*04f0*/ BRA 0x600 ; /* 0x0000010000007947 */
/* 0x000fee0003800000 */
/*0500*/ LOP3.LUT P0, RZ, R19, R18, RZ, 0xfc, !PT ; /* 0x0000001213ff7212 */
/* 0x020fe2000780fcff */
/*0510*/ BRA 0x600 ; /* 0x000000e000007947 */
/* 0x000fee0003800000 */
/*0520*/ IADD3 R14, R16, -0x3, RZ ; /* 0xfffffffd100e7810 */
/* 0x001fc80007ffe0ff */
/*0530*/ IMNMX.U32 R14, R14, 0x2, PT ; /* 0x000000020e0e7817 */
/* 0x000fca0003800000 */
/*0540*/ IMAD.SHL.U32 R17, R14, 0x4, RZ ; /* 0x000000040e117824 */
/* 0x000fc800078e00ff */
/*0550*/ LDC R14, c[0x2][R17+0x10] ; /* 0x00800400110e7b82 */
/* 0x000e240000000800 */
/*0560*/ SHF.R.S32.HI R15, RZ, 0x1f, R14 ; /* 0x0000001fff0f7819 */
/* 0x001fc8000001140e */
/*0570*/ BRX R14 -0x580 ; /* 0xfffffa800e007949 */
/* 0x000fea000383ffff */
/*0580*/ ISETP.NE.AND P1, PT, R16, 0x5, PT ; /* 0x000000051000780c */
/* 0x000fda0003f25270 */
/*0590*/ @P1 BRA 0x600 ; /* 0x0000006000001947 */
/* 0x000fea0003800000 */
/*05a0*/ ISETP.EQ.AND P0, PT, R18, R19, PT ; /* 0x000000131200720c */
/* 0x020fe20003f02270 */
/*05b0*/ BRA 0x600 ; /* 0x0000004000007947 */
/* 0x000fee0003800000 */
/*05c0*/ LOP3.LUT P0, RZ, R19, R18, RZ, 0xfc, !PT ; /* 0x0000001213ff7212 */
/* 0x020fc8000780fcff */
/*05d0*/ PLOP3.LUT P0, PT, P0, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe2000070e170 */
/*05e0*/ BRA 0x600 ; /* 0x0000001000007947 */
/* 0x000fee0003800000 */
/*05f0*/ ISETP.NE.AND P0, PT, R18, R19, PT ; /* 0x000000131200720c */
/* 0x020fd00003f05270 */
/*0600*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0610*/ S2R R15, SR_LANEID ; /* 0x00000000000f7919 */
/* 0x000e220000000000 */
/*0620*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */
/* 0x000fe200038e0100 */
/*0630*/ IADD3 R12, P2, R12, c[0x0][0x198], RZ ; /* 0x000066000c0c7a10 */
/* 0x000fe20007f5e0ff */
/*0640*/ FLO.U32 R18, UR6 ; /* 0x0000000600127d00 */
/* 0x020e2200080e0000 */
/*0650*/ SEL R19, RZ, 0x1, !P0 ; /* 0x00000001ff137807 */
/* 0x000fe20004000000 */
/*0660*/ IMAD.MOV.U32 R14, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff0e7624 */
/* 0x000fe200078e00ff */
/*0670*/ IADD3.X R13, R13, c[0x0][0x19c], RZ, P2, !PT ; /* 0x000067000d0d7a10 */
/* 0x000fca00017fe4ff */
/*0680*/ POPC R21, UR6 ; /* 0x0000000600157d09 */
/* 0x000e620008000000 */
/*0690*/ STG.E [R12.64], R19 ; /* 0x000000130c007986 */
/* 0x000fe2000c101904 */
/*06a0*/ ISETP.EQ.U32.AND P1, PT, R18, R15, PT ; /* 0x0000000f1200720c */
/* 0x001fe20003f22070 */
/*06b0*/ IMAD.MOV.U32 R15, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff0f7624 */
/* 0x000fd800078e00ff */
/*06c0*/ @P1 ATOMG.E.ADD.STRONG.GPU PT, R15, [R14.64], R21 ; /* 0x000000150e0f19a8 */
/* 0x002ea800081ee1c4 */
/*06d0*/ S2R R17, SR_LTMASK ; /* 0x0000000000117919 */
/* 0x000e240000003900 */
/*06e0*/ LOP3.LUT R20, R17, UR6, RZ, 0xc0, !PT ; /* 0x0000000611147c12 */
/* 0x001fc8000f8ec0ff */
/*06f0*/ POPC R17, R20 ; /* 0x0000001400117309 */
/* 0x000e220000000000 */
/*0700*/ SHFL.IDX PT, R16, R15, R18, 0x1f ; /* 0x00001f120f107589 */
/* 0x004e2400000e0000 */
/*0710*/ IMAD.IADD R16, R16, 0x1, R17 ; /* 0x0000000110107824 */
/* 0x001fc800078e0211 */
/*0720*/ IMAD.WIDE R16, R16, R11, c[0x4][0x0] ; /* 0x0100000010107625 */
/* 0x000fca00078e020b */
/*0730*/ STG.E [R16.64], R10 ; /* 0x0000000a10007986 */
/* 0x0001e4000c101904 */
/*0740*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0750*/ LDG.E R10, [R2.64+0x4] ; /* 0x00000404020a7981 */
/* 0x001ea2000c1e1900 */
/*0760*/ IADD3 R7, R7, 0x1, RZ ; /* 0x0000000107077810 */
/* 0x000fe40007ffe0ff */
/*0770*/ IADD3 R8, P1, R8, 0x4, RZ ; /* 0x0000000408087810 */
/* 0x000fca0007f3e0ff */
/*0780*/ IMAD.X R9, RZ, RZ, R9, P1 ; /* 0x000000ffff097224 */
/* 0x000fe200008e0609 */
/*0790*/ ISETP.GE.AND P0, PT, R7, R10, PT ; /* 0x0000000a0700720c */
/* 0x004fda0003f06270 */
/*07a0*/ @!P0 BRA 0x2e0 ; /* 0xfffffb3000008947 */
/* 0x000fea000383ffff */
/*07b0*/ IADD3 R0, R0, 0x1, RZ ; /* 0x0000000100007810 */
/* 0x000fe20007ffe0ff */
/*07c0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*07d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe40000010000 */
/*07e0*/ ISETP.GE.AND P0, PT, R0.reuse, c[0x0][0x164], PT ; /* 0x0000590000007a0c */
/* 0x040fe40003f06270 */
/*07f0*/ ISETP.LT.AND P1, PT, R0, R6, PT ; /* 0x000000060000720c */
/* 0x000fda0003f21270 */
/*0800*/ @!P0 BRA P1, 0x220 ; /* 0xfffffa1000008947 */
/* 0x000fea000083ffff */
/*0810*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0820*/ BRA 0x820; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0830*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0840*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0850*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0860*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0870*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0880*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0890*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.globl _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.p2align 8
.type _Z21global_queuing_kerneliiPiS_S_S_S_S_S_,@function
_Z21global_queuing_kerneliiPiS_S_S_S_S_S_:
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s4, s[0:1], 0x4c
s_mov_b32 s16, 0
s_waitcnt lgkmcnt(0)
s_ashr_i32 s5, s2, 31
s_ashr_i32 s8, s3, 31
s_add_i32 s2, s2, s5
s_add_i32 s9, s3, s8
s_xor_b32 s2, s2, s5
s_xor_b32 s9, s9, s8
v_cvt_f32_u32_e32 v1, s2
s_sub_i32 s7, 0, s2
s_xor_b32 s5, s8, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
v_cvt_u32_f32_e32 v1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s6, v1
s_mul_i32 s7, s7, s6
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_hi_u32 s7, s6, s7
s_add_i32 s6, s6, s7
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_hi_u32 s6, s9, s6
s_mul_i32 s7, s6, s2
s_add_i32 s8, s6, 1
s_sub_i32 s7, s9, s7
s_delay_alu instid0(SALU_CYCLE_1)
s_sub_i32 s9, s7, s2
s_cmp_ge_u32 s7, s2
s_cselect_b32 s6, s8, s6
s_cselect_b32 s7, s9, s7
s_add_i32 s8, s6, 1
s_cmp_ge_u32 s7, s2
s_cselect_b32 s2, s8, s6
s_and_b32 s4, s4, 0xffff
s_xor_b32 s2, s2, s5
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_sub_i32 s2, s2, s5
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v0, v1, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, s2, v0
s_mov_b32 s2, exec_lo
v_min_i32_e32 v11, s3, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_lt_i32_e64 v0, v11
s_cbranch_execz .LBB0_34
s_clause 0x2
s_load_b256 s[4:11], s[0:1], 0x8
s_load_b128 s[12:15], s[0:1], 0x28
s_load_b64 s[2:3], s[0:1], 0x38
v_dual_mov_b32 v12, 1 :: v_dual_mov_b32 v13, 0
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s1
v_add_nc_u32_e32 v0, 1, v0
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
v_cmp_ge_i32_e32 vcc_lo, v0, v11
s_or_b32 s16, vcc_lo, s16
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s16
s_cbranch_execz .LBB0_34
.LBB0_3:
v_ashrrev_i32_e32 v1, 31, v0
s_mov_b32 s1, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
global_load_b32 v1, v[1:2], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[5:6], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s4, v5
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v6, vcc_lo
global_load_b64 v[3:4], v[1:2], off
s_waitcnt vmcnt(0)
v_cmpx_lt_i32_e64 v3, v4
s_cbranch_execz .LBB0_2
v_ashrrev_i32_e32 v4, 31, v3
s_mov_b32 s17, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[7:8], 2, v[3:4]
v_add_co_u32 v4, vcc_lo, s2, v5
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v6, vcc_lo
v_add_co_u32 v6, vcc_lo, s8, v7
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v8, vcc_lo
s_branch .LBB0_7
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s19
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s0, v9
s_getpc_b64 s[20:21]
s_add_u32 s20, s20, globalQueue@rel32@lo+4
s_addc_u32 s21, s21, globalQueue@rel32@hi+12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v9, s0, v14
v_ashrrev_i32_e32 v10, 31, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[9:10], 2, v[9:10]
v_add_co_u32 v9, vcc_lo, v9, s20
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v10, vcc_lo, s21, v10, vcc_lo
global_store_b32 v[9:10], v8, off
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s18
global_load_b32 v8, v[1:2], off offset:4
v_add_nc_u32_e32 v3, 1, v3
v_add_co_u32 v6, s0, v6, 4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v7, s0, 0, v7, s0
s_waitcnt vmcnt(0)
v_cmp_ge_i32_e32 vcc_lo, v3, v8
s_or_b32 s17, vcc_lo, s17
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s17
s_cbranch_execz .LBB0_2
.LBB0_7:
global_load_b32 v8, v[6:7], off
s_mov_b32 s18, exec_lo
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v9, 31, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[9:10], 2, v[8:9]
v_add_co_u32 v14, vcc_lo, s10, v9
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v15, vcc_lo, s11, v10, vcc_lo
global_atomic_swap_b32 v14, v[14:15], v12, off glc
s_waitcnt vmcnt(0)
v_cmpx_eq_u32_e32 0, v14
s_cbranch_execz .LBB0_6
v_add_co_u32 v14, vcc_lo, s12, v9
v_add_co_ci_u32_e32 v15, vcc_lo, s13, v10, vcc_lo
v_add_co_u32 v17, vcc_lo, s14, v9
v_add_co_ci_u32_e32 v18, vcc_lo, s15, v10, vcc_lo
s_mov_b32 s0, exec_lo
global_load_b32 v16, v[14:15], off
global_load_b32 v14, v[17:18], off
global_load_b32 v15, v[4:5], off
s_waitcnt vmcnt(2)
v_cmpx_lt_i32_e32 2, v16
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB0_20
s_mov_b32 s19, exec_lo
v_cmpx_lt_i32_e32 3, v16
s_xor_b32 s19, exec_lo, s19
s_cbranch_execz .LBB0_17
s_mov_b32 s21, exec_lo
v_cmpx_lt_i32_e32 4, v16
s_xor_b32 s21, exec_lo, s21
s_cbranch_execz .LBB0_14
s_mov_b32 s20, 0
s_mov_b32 s22, exec_lo
v_cmpx_eq_u32_e32 5, v16
s_cbranch_execz .LBB0_13
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v14, v15
s_and_b32 s20, vcc_lo, exec_lo
.LBB0_13:
s_or_b32 exec_lo, exec_lo, s22
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 s20, s20, exec_lo
.LBB0_14:
s_and_not1_saveexec_b32 s21, s21
s_cbranch_execz .LBB0_16
s_waitcnt vmcnt(0)
v_cmp_ne_u32_e32 vcc_lo, v14, v15
s_and_not1_b32 s20, s20, exec_lo
s_and_b32 s22, vcc_lo, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s20, s20, s22
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s21
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 s20, s20, exec_lo
.LBB0_17:
s_and_not1_saveexec_b32 s19, s19
s_cbranch_execz .LBB0_19
s_waitcnt vmcnt(0)
v_or_b32_e32 v14, v14, v15
s_and_not1_b32 s20, s20, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_eq_u32_e32 vcc_lo, 0, v14
s_and_b32 s21, vcc_lo, exec_lo
s_or_b32 s20, s20, s21
.LBB0_19:
s_or_b32 exec_lo, exec_lo, s19
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 s19, s20, exec_lo
.LBB0_20:
s_and_not1_saveexec_b32 s20, s0
s_cbranch_execz .LBB0_32
s_mov_b32 s0, exec_lo
v_cmpx_lt_i32_e32 0, v16
s_xor_b32 s21, exec_lo, s0
s_cbranch_execz .LBB0_27
s_mov_b32 s22, exec_lo
v_cmpx_lt_i32_e32 1, v16
s_xor_b32 s22, exec_lo, s22
s_cbranch_execz .LBB0_24
s_waitcnt vmcnt(1)
v_cmp_ne_u32_e32 vcc_lo, 1, v14
s_waitcnt vmcnt(0)
v_cmp_ne_u32_e64 s0, 1, v15
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_or_b32 s0, vcc_lo, s0
s_and_b32 s0, s0, exec_lo
.LBB0_24:
s_and_not1_saveexec_b32 s22, s22
s_cbranch_execz .LBB0_26
s_waitcnt vmcnt(0)
v_or_b32_e32 v14, v14, v15
s_and_not1_b32 s0, s0, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_ne_u32_e32 vcc_lo, 0, v14
s_and_b32 s23, vcc_lo, exec_lo
s_or_b32 s0, s0, s23
.LBB0_26:
s_or_b32 exec_lo, exec_lo, s22
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 s22, s0, exec_lo
.LBB0_27:
s_and_not1_saveexec_b32 s21, s21
s_cbranch_execz .LBB0_31
s_mov_b32 s0, 0
s_mov_b32 s23, exec_lo
v_cmpx_eq_u32_e32 0, v16
s_cbranch_execz .LBB0_30
s_waitcnt vmcnt(1)
v_cmp_eq_u32_e32 vcc_lo, 1, v14
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e64 s0, 1, v15
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s0, vcc_lo, s0
s_and_b32 s0, s0, exec_lo
.LBB0_30:
s_or_b32 exec_lo, exec_lo, s23
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_and_not1_b32 s22, s22, exec_lo
s_and_b32 s0, s0, exec_lo
s_or_b32 s22, s22, s0
.LBB0_31:
s_or_b32 exec_lo, exec_lo, s21
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_and_not1_b32 s0, s19, exec_lo
s_and_b32 s19, s22, exec_lo
s_or_b32 s19, s0, s19
.LBB0_32:
s_or_b32 exec_lo, exec_lo, s20
s_delay_alu instid0(SALU_CYCLE_1)
s_mov_b32 s0, exec_lo
v_add_co_u32 v9, vcc_lo, s2, v9
s_waitcnt vmcnt(0)
v_cndmask_b32_e64 v15, 0, 1, s19
v_mbcnt_lo_u32_b32 v14, s0, 0
v_add_co_ci_u32_e32 v10, vcc_lo, s3, v10, vcc_lo
s_mov_b32 s19, exec_lo
global_store_b32 v[9:10], v15, off
v_cmpx_eq_u32_e32 0, v14
s_cbranch_execz .LBB0_5
s_bcnt1_i32_b32 s0, s0
s_getpc_b64 s[20:21]
s_add_u32 s20, s20, numNextLevelNodes@rel32@lo+4
s_addc_u32 s21, s21, numNextLevelNodes@rel32@hi+12
v_mov_b32_e32 v9, s0
global_atomic_add_u32 v9, v13, v9, s[20:21] glc
s_branch .LBB0_5
.LBB0_34:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 320
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 19
.amdhsa_next_free_sgpr 24
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z21global_queuing_kerneliiPiS_S_S_S_S_S_, .Lfunc_end0-_Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected globalQueue
.type globalQueue,@object
.section .bss,"aw",@nobits
.globl globalQueue
.p2align 4, 0x0
globalQueue:
.zero 28000000
.size globalQueue, 28000000
.protected numNextLevelNodes
.type numNextLevelNodes,@object
.globl numNextLevelNodes
.p2align 2, 0x0
numNextLevelNodes:
.long 0
.size numNextLevelNodes, 4
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym globalQueue
.addrsig_sym numNextLevelNodes
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .offset: 64
.size: 4
.value_kind: hidden_block_count_x
- .offset: 68
.size: 4
.value_kind: hidden_block_count_y
- .offset: 72
.size: 4
.value_kind: hidden_block_count_z
- .offset: 76
.size: 2
.value_kind: hidden_group_size_x
- .offset: 78
.size: 2
.value_kind: hidden_group_size_y
- .offset: 80
.size: 2
.value_kind: hidden_group_size_z
- .offset: 82
.size: 2
.value_kind: hidden_remainder_x
- .offset: 84
.size: 2
.value_kind: hidden_remainder_y
- .offset: 86
.size: 2
.value_kind: hidden_remainder_z
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 128
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 320
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 26
.sgpr_spill_count: 0
.symbol: _Z21global_queuing_kerneliiPiS_S_S_S_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 19
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0017befd_00000000-6_global_queueing.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "r"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Couldn't open file for reading\n"
.section .rodata.str1.1
.LC2:
.string "%d"
.text
.globl _Z23read_input_one_two_fourPPiPc
.type _Z23read_input_one_two_fourPPiPc, @function
_Z23read_input_one_two_fourPPiPc:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $24, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r12
movq %rsi, %rdi
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
call fopen@PLT
testq %rax, %rax
je .L11
movq %rax, %rbp
movq %rsp, %rdx
leaq .LC2(%rip), %rsi
movq %rax, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movl (%rsp), %ebx
movslq %ebx, %rdi
salq $2, %rdi
call malloc@PLT
movq %rax, (%r12)
testl %ebx, %ebx
jle .L6
movl $0, %ebx
leaq .LC2(%rip), %r13
.L7:
leaq 4(%rsp), %rdx
movq %r13, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movq (%r12), %rax
movl 4(%rsp), %edx
movl %edx, (%rax,%rbx,4)
addq $1, %rbx
cmpl %ebx, (%rsp)
jg .L7
.L6:
movq %rbp, %rdi
call fclose@PLT
movl (%rsp), %eax
.L3:
movq 8(%rsp), %rdx
subq %fs:40, %rdx
jne .L12
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L3
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z23read_input_one_two_fourPPiPc, .-_Z23read_input_one_two_fourPPiPc
.section .rodata.str1.1
.LC3:
.string "%d, %d, %d, %d"
.text
.globl _Z16read_input_threePPiS0_S0_S0_Pc
.type _Z16read_input_threePPiS0_S0_S0_Pc, @function
_Z16read_input_threePPiS0_S0_S0_Pc:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r12
movq %rsi, %r13
movq %rdx, %r14
movq %rcx, %r15
movq %r8, %rdi
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
call fopen@PLT
testq %rax, %rax
je .L21
movq %rax, %rbp
leaq 20(%rsp), %rdx
leaq .LC2(%rip), %rsi
movq %rax, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movl 20(%rsp), %eax
movl %eax, 12(%rsp)
movslq %eax, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, (%r12)
movq %rbx, %rdi
call malloc@PLT
movq %rax, 0(%r13)
movq %rbx, %rdi
call malloc@PLT
movq %rax, (%r14)
movq %rbx, %rdi
call malloc@PLT
movq %rax, (%r15)
cmpl $0, 12(%rsp)
jle .L16
movl $0, %ebx
.L17:
leaq 28(%rsp), %rcx
leaq 24(%rsp), %rdx
leaq 36(%rsp), %r9
leaq 32(%rsp), %r8
leaq .LC3(%rip), %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movq (%r12), %rax
movl 24(%rsp), %edx
movl %edx, (%rax,%rbx,4)
movq 0(%r13), %rax
movl 28(%rsp), %edx
movl %edx, (%rax,%rbx,4)
movq (%r14), %rax
movl 32(%rsp), %edx
movl %edx, (%rax,%rbx,4)
movq (%r15), %rax
movl 36(%rsp), %edx
movl %edx, (%rax,%rbx,4)
addq $1, %rbx
cmpl %ebx, 20(%rsp)
jg .L17
.L16:
movq %rbp, %rdi
call fclose@PLT
movl 20(%rsp), %eax
.L13:
movq 40(%rsp), %rdx
subq %fs:40, %rdx
jne .L22
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L13
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z16read_input_threePPiS0_S0_S0_Pc, .-_Z16read_input_threePPiS0_S0_S0_Pc
.section .rodata._Z12checkCudaErr9cudaErrorPKc.str1.1,"aMS",@progbits,1
.LC4:
.string "Error at runtime %s: %s\n"
.section .text._Z12checkCudaErr9cudaErrorPKc,"axG",@progbits,_Z12checkCudaErr9cudaErrorPKc,comdat
.weak _Z12checkCudaErr9cudaErrorPKc
.type _Z12checkCudaErr9cudaErrorPKc, @function
_Z12checkCudaErr9cudaErrorPKc:
.LFB2059:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movl %edi, %ebx
testl %edi, %edi
jne .L26
.L24:
movl %ebx, %eax
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
movq %rsi, %rbp
call cudaGetErrorString@PLT
movq %rax, %r8
movq %rbp, %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L24
.cfi_endproc
.LFE2059:
.size _Z12checkCudaErr9cudaErrorPKc, .-_Z12checkCudaErr9cudaErrorPKc
.text
.globl _Z55__device_stub__Z21global_queuing_kerneliiPiS_S_S_S_S_S_iiPiS_S_S_S_S_S_
.type _Z55__device_stub__Z21global_queuing_kerneliiPiS_S_S_S_S_S_iiPiS_S_S_S_S_S_, @function
_Z55__device_stub__Z21global_queuing_kerneliiPiS_S_S_S_S_S_iiPiS_S_S_S_S_S_:
.LFB2085:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movl %edi, 60(%rsp)
movl %esi, 56(%rsp)
movq %rdx, 48(%rsp)
movq %rcx, 40(%rsp)
movq %r8, 32(%rsp)
movq %r9, 24(%rsp)
movq 224(%rsp), %rax
movq %rax, 16(%rsp)
movq 232(%rsp), %rax
movq %rax, 8(%rsp)
movq 240(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 60(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rax
movq %rax, 152(%rsp)
leaq 32(%rsp), %rax
movq %rax, 160(%rsp)
leaq 24(%rsp), %rax
movq %rax, 168(%rsp)
leaq 16(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
movq %rsp, %rax
movq %rax, 192(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z21global_queuing_kerneliiPiS_S_S_S_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z55__device_stub__Z21global_queuing_kerneliiPiS_S_S_S_S_S_iiPiS_S_S_S_S_S_, .-_Z55__device_stub__Z21global_queuing_kerneliiPiS_S_S_S_S_S_iiPiS_S_S_S_S_S_
.globl _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.type _Z21global_queuing_kerneliiPiS_S_S_S_S_S_, @function
_Z21global_queuing_kerneliiPiS_S_S_S_S_S_:
.LFB2086:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 40(%rsp)
.cfi_def_cfa_offset 32
pushq 40(%rsp)
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z55__device_stub__Z21global_queuing_kerneliiPiS_S_S_S_S_S_iiPiS_S_S_S_S_S_
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z21global_queuing_kerneliiPiS_S_S_S_S_S_, .-_Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.section .rodata.str1.1
.LC5:
.string "Missing input argument(s)!\n"
.LC6:
.string "Copying"
.LC7:
.string "Synchronization"
.LC8:
.string "GPU"
.LC9:
.string "w"
.LC10:
.string "%d\n"
.LC13:
.string "Execution time: %f ms\n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $248, %rsp
.cfi_def_cfa_offset 304
movq %fs:40, %rax
movq %rax, 232(%rsp)
xorl %eax, %eax
cmpl $6, %edi
jg .L36
leaq .LC5(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
call __fprintf_chk@PLT
.L35:
movq 232(%rsp), %rdx
subq %fs:40, %rdx
jne .L47
addq $248, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L36:
.cfi_restore_state
movq %rsi, %r12
movq 8(%rsi), %rsi
leaq 96(%rsp), %rdi
call _Z23read_input_one_two_fourPPiPc
movl %eax, %r15d
movq 16(%r12), %rsi
leaq 104(%rsp), %rdi
call _Z23read_input_one_two_fourPPiPc
movl %eax, %ebx
leaq 144(%rsp), %rcx
leaq 136(%rsp), %rdx
leaq 128(%rsp), %rsi
leaq 112(%rsp), %rdi
movq 24(%r12), %r8
call _Z16read_input_threePPiS0_S0_S0_Pc
movl %eax, %r13d
movq 32(%r12), %rsi
leaq 120(%rsp), %rdi
call _Z23read_input_one_two_fourPPiPc
movl %eax, %ebp
movq 40(%r12), %rax
movq %rax, 56(%rsp)
movq 48(%r12), %rax
movq %rax, 64(%rsp)
movslq %r13d, %r14
salq $2, %r14
movq %r14, %rdi
call malloc@PLT
movq %rax, %r12
movl $1, %r8d
movl $0, %ecx
movq %r14, %rdx
movq %rax, %rsi
leaq _ZL11globalQueue(%rip), %rdi
call cudaMemcpyToSymbol@PLT
movl %eax, %edi
leaq .LC6(%rip), %rsi
call _Z12checkCudaErr9cudaErrorPKc
movslq %r15d, %r15
salq $2, %r15
movq %r15, %rdi
call malloc@PLT
movq %rax, 152(%rsp)
leaq 152(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movq 96(%rsp), %rax
movq %rax, 8(%rsp)
movl $1, %ecx
movq %r15, %rdx
movq %rax, %rsi
movq 152(%rsp), %rdi
call cudaMemcpy@PLT
movslq %ebp, %rbp
salq $2, %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, 160(%rsp)
leaq 160(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movq 120(%rsp), %rax
movq %rax, 16(%rsp)
movl $1, %ecx
movq %rbp, %rdx
movq %rax, %rsi
movq 160(%rsp), %rdi
call cudaMemcpy@PLT
movslq %ebx, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, 168(%rsp)
leaq 168(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movq 104(%rsp), %rax
movq %rax, 24(%rsp)
movl $1, %ecx
movq %rbx, %rdx
movq %rax, %rsi
movq 168(%rsp), %rdi
call cudaMemcpy@PLT
leal 0(,%r13,4), %ebx
movslq %ebx, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, 176(%rsp)
leaq 176(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movq 112(%rsp), %rax
movq %rax, 32(%rsp)
movl $1, %ecx
movq %rbx, %rdx
movq %rax, %rsi
movq 176(%rsp), %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call malloc@PLT
movq %rax, 184(%rsp)
leaq 184(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movq 128(%rsp), %rax
movq %rax, 40(%rsp)
movl $1, %ecx
movq %rbx, %rdx
movq %rax, %rsi
movq 184(%rsp), %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call malloc@PLT
movq %rax, 192(%rsp)
leaq 192(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movq 136(%rsp), %rax
movq %rax, 48(%rsp)
movl $1, %ecx
movq %rbx, %rdx
movq %rax, %rsi
movq 192(%rsp), %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call malloc@PLT
movq %rax, 200(%rsp)
leaq 200(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movq 144(%rsp), %r15
movl $1, %ecx
movq %rbx, %rdx
movq %r15, %rsi
movq 200(%rsp), %rdi
call cudaMemcpy@PLT
call clock@PLT
movq %rax, 72(%rsp)
movl $128, 220(%rsp)
movl $1, 224(%rsp)
movl $1, 228(%rsp)
movl $35, 208(%rsp)
movl $1, 212(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 220(%rsp), %rdx
movl $1, %ecx
movq 208(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L48
.L38:
call clock@PLT
movq %rax, 80(%rsp)
call cudaDeviceSynchronize@PLT
movl %eax, %edi
leaq .LC7(%rip), %rsi
call _Z12checkCudaErr9cudaErrorPKc
call cudaGetLastError@PLT
movl %eax, %edi
leaq .LC8(%rip), %rsi
call _Z12checkCudaErr9cudaErrorPKc
leaq 220(%rsp), %rdi
movl $2, %r8d
movl $0, %ecx
movl $4, %edx
leaq _ZL17numNextLevelNodes(%rip), %rsi
call cudaMemcpyFromSymbol@PLT
movl $2, %r8d
movl $0, %ecx
movq %rbx, %rdx
leaq _ZL11globalQueue(%rip), %rsi
movq %r12, %rdi
call cudaMemcpyFromSymbol@PLT
movl %eax, %edi
leaq .LC6(%rip), %rbp
movq %rbp, %rsi
call _Z12checkCudaErr9cudaErrorPKc
movq %rbx, %rdi
call malloc@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 200(%rsp), %rsi
movq %rax, 88(%rsp)
movq %rax, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movq %rbp, %rsi
call _Z12checkCudaErr9cudaErrorPKc
leaq .LC9(%rip), %rsi
movq 56(%rsp), %rdi
call fopen@PLT
movq %rax, %rbx
movl %r13d, %ecx
leaq .LC10(%rip), %rdx
movl $2, %esi
movq %rax, %rdi
movl $0, %eax
call __fprintf_chk@PLT
testl %r13d, %r13d
jle .L39
movq 88(%rsp), %rbp
movq %rbp, %r13
addq %r14, %rbp
leaq .LC10(%rip), %r14
.L40:
movl 0(%r13), %ecx
movq %r14, %rdx
movl $2, %esi
movq %rbx, %rdi
movl $0, %eax
call __fprintf_chk@PLT
addq $4, %r13
cmpq %rbp, %r13
jne .L40
.L39:
movq %rbx, %rdi
call fclose@PLT
leaq .LC9(%rip), %rsi
movq 64(%rsp), %rdi
call fopen@PLT
movq %rax, %rbp
movl 220(%rsp), %ecx
leaq .LC10(%rip), %rdx
movl $2, %esi
movq %rax, %rdi
movl $0, %eax
call __fprintf_chk@PLT
cmpl $0, 220(%rsp)
jle .L41
movl $0, %ebx
leaq .LC10(%rip), %r13
.L42:
movl (%r12,%rbx,4), %ecx
movq %r13, %rdx
movl $2, %esi
movq %rbp, %rdi
movl $0, %eax
call __fprintf_chk@PLT
addq $1, %rbx
cmpl %ebx, 220(%rsp)
jg .L42
.L41:
movq %rbp, %rdi
call fclose@PLT
movq 80(%rsp), %rax
movq 72(%rsp), %rdx
subq %rdx, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC11(%rip), %xmm0
mulsd .LC12(%rip), %xmm0
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call free@PLT
movq 24(%rsp), %rdi
call free@PLT
movq 32(%rsp), %rdi
call free@PLT
movq 16(%rsp), %rdi
call free@PLT
movq 40(%rsp), %rdi
call free@PLT
movq 48(%rsp), %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
movq 160(%rsp), %rdi
call cudaFree@PLT
movq 168(%rsp), %rdi
call cudaFree@PLT
movq 152(%rsp), %rdi
call cudaFree@PLT
movq 176(%rsp), %rdi
call cudaFree@PLT
movq 192(%rsp), %rdi
call cudaFree@PLT
movq 200(%rsp), %rdi
call cudaFree@PLT
movq 184(%rsp), %rdi
call cudaFree@PLT
movl $0, %eax
jmp .L35
.L48:
subq $8, %rsp
.cfi_def_cfa_offset 312
pushq 208(%rsp)
.cfi_def_cfa_offset 320
pushq 208(%rsp)
.cfi_def_cfa_offset 328
pushq 208(%rsp)
.cfi_def_cfa_offset 336
movq 208(%rsp), %r9
movq 200(%rsp), %r8
movq 192(%rsp), %rcx
movq 184(%rsp), %rdx
movl %r13d, %esi
movl $4480, %edi
call _Z55__device_stub__Z21global_queuing_kerneliiPiS_S_S_S_S_S_iiPiS_S_S_S_S_S_
addq $32, %rsp
.cfi_def_cfa_offset 304
jmp .L38
.L47:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC14:
.string "_Z21global_queuing_kerneliiPiS_S_S_S_S_S_"
.section .rodata.str1.1
.LC15:
.string "globalQueue"
.LC16:
.string "numNextLevelNodes"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z21global_queuing_kerneliiPiS_S_S_S_S_S_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $28000000, %r9d
movl $0, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _ZL11globalQueue(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC16(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17numNextLevelNodes(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL17numNextLevelNodes
.comm _ZL17numNextLevelNodes,4,4
.local _ZL11globalQueue
.comm _ZL11globalQueue,28000000,32
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC11:
.long 0
.long 1093567616
.align 8
.LC12:
.long 0
.long 1083129856
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "global_queueing.hip"
.globl _Z23read_input_one_two_fourPPiPc # -- Begin function _Z23read_input_one_two_fourPPiPc
.p2align 4, 0x90
.type _Z23read_input_one_two_fourPPiPc,@function
_Z23read_input_one_two_fourPPiPc: # @_Z23read_input_one_two_fourPPiPc
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rax
movq %rdi, %rbx
movl $.L.str, %esi
movq %rax, %rdi
callq fopen
testq %rax, %rax
je .LBB0_5
# %bb.1:
movq %rax, %r14
movq %rsp, %rdx
movl $.L.str.2, %esi
movq %rax, %rdi
xorl %eax, %eax
callq __isoc23_fscanf
movslq (%rsp), %r15
leaq (,%r15,4), %rdi
callq malloc
movq %rax, (%rbx)
testq %r15, %r15
jle .LBB0_4
# %bb.2: # %.lr.ph.preheader
leaq 4(%rsp), %r15
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB0_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str.2, %esi
movq %r14, %rdi
movq %r15, %rdx
xorl %eax, %eax
callq __isoc23_fscanf
movl 4(%rsp), %eax
movq (%rbx), %rcx
movl %eax, (%rcx,%r12,4)
incq %r12
movslq (%rsp), %rax
cmpq %rax, %r12
jl .LBB0_3
.LBB0_4: # %._crit_edge
movq %r14, %rdi
callq fclose
movl (%rsp), %eax
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB0_5:
.cfi_def_cfa_offset 48
movq stderr(%rip), %rdi
movl $.L.str.1, %esi
xorl %eax, %eax
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
jmp fprintf # TAILCALL
.Lfunc_end0:
.size _Z23read_input_one_two_fourPPiPc, .Lfunc_end0-_Z23read_input_one_two_fourPPiPc
.cfi_endproc
# -- End function
.globl _Z16read_input_threePPiS0_S0_S0_Pc # -- Begin function _Z16read_input_threePPiS0_S0_S0_Pc
.p2align 4, 0x90
.type _Z16read_input_threePPiS0_S0_S0_Pc,@function
_Z16read_input_threePPiS0_S0_S0_Pc: # @_Z16read_input_threePPiS0_S0_S0_Pc
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rcx, %rbx
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %r12
movl $.L.str, %esi
movq %r8, %rdi
callq fopen
testq %rax, %rax
je .LBB1_5
# %bb.1:
movq %rax, %r13
leaq 4(%rsp), %rdx
movl $.L.str.2, %esi
movq %rax, %rdi
xorl %eax, %eax
callq __isoc23_fscanf
movslq 4(%rsp), %rbp
shlq $2, %rbp
movq %rbp, %rdi
callq malloc
movq %rax, (%r12)
movq %rbp, %rdi
callq malloc
movq %rax, (%r15)
movq %rbp, %rdi
callq malloc
movq %rax, (%r14)
movq %rbp, %rdi
callq malloc
movq %rax, (%rbx)
cmpl $0, 4(%rsp)
jle .LBB1_4
# %bb.2: # %.lr.ph.preheader
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str.3, %esi
movq %r13, %rdi
leaq 20(%rsp), %rdx
leaq 16(%rsp), %rcx
leaq 12(%rsp), %r8
leaq 8(%rsp), %r9
xorl %eax, %eax
callq __isoc23_fscanf
movl 20(%rsp), %eax
movq (%r12), %rcx
movl %eax, (%rcx,%rbp,4)
movl 16(%rsp), %eax
movq (%r15), %rcx
movl %eax, (%rcx,%rbp,4)
movl 12(%rsp), %eax
movq (%r14), %rcx
movl %eax, (%rcx,%rbp,4)
movl 8(%rsp), %eax
movq (%rbx), %rcx
movl %eax, (%rcx,%rbp,4)
incq %rbp
movslq 4(%rsp), %rax
cmpq %rax, %rbp
jl .LBB1_3
.LBB1_4: # %._crit_edge
movq %r13, %rdi
callq fclose
movl 4(%rsp), %eax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_5:
.cfi_def_cfa_offset 80
movq stderr(%rip), %rdi
movl $.L.str.1, %esi
xorl %eax, %eax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp fprintf # TAILCALL
.Lfunc_end1:
.size _Z16read_input_threePPiS0_S0_S0_Pc, .Lfunc_end1-_Z16read_input_threePPiS0_S0_S0_Pc
.cfi_endproc
# -- End function
.globl _Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_ # -- Begin function _Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_
.p2align 4, 0x90
.type _Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_,@function
_Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_: # @_Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z21global_queuing_kerneliiPiS_S_S_S_S_S_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end2:
.size _Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_, .Lfunc_end2-_Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0x412e848000000000 # double 1.0E+6
.LCPI3_1:
.quad 0x408f400000000000 # double 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
cmpl $6, %edi
jle .LBB3_20
# %bb.1:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $328, %rsp # imm = 0x148
.cfi_def_cfa_offset 384
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movq 8(%rsi), %rsi
leaq 120(%rsp), %rdi
callq _Z23read_input_one_two_fourPPiPc
movl %eax, %r12d
movq 16(%rbx), %rsi
leaq 112(%rsp), %rdi
callq _Z23read_input_one_two_fourPPiPc
movl %eax, %r15d
movq 24(%rbx), %r8
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq _Z16read_input_threePPiS0_S0_S0_Pc
movl %eax, %ebp
movq 32(%rbx), %rsi
leaq 96(%rsp), %rdi
callq _Z23read_input_one_two_fourPPiPc
movl %eax, %r13d
movq 40(%rbx), %rax
movq %rax, 136(%rsp) # 8-byte Spill
movq 48(%rbx), %rax
movq %rax, 144(%rsp) # 8-byte Spill
movl %ebp, 60(%rsp) # 4-byte Spill
movslq %ebp, %r14
leaq (,%r14,4), %rbp
movq %rbp, %rdi
callq malloc
movq %rax, %rbx
movl $globalQueue, %edi
movq %rax, %rsi
movq %rbp, %rdx
xorl %ecx, %ecx
movl $1, %r8d
callq hipMemcpyToSymbol
testl %eax, %eax
jne .LBB3_2
.LBB3_3: # %_Z12checkCudaErr10hipError_tPKc.exit
movslq %r12d, %r12
shlq $2, %r12
movq %r12, %rdi
callq malloc
movq %rax, 48(%rsp)
leaq 48(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
movq 48(%rsp), %rdi
movq 120(%rsp), %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
movslq %r13d, %r12
shlq $2, %r12
movq %r12, %rdi
callq malloc
movq %rax, 40(%rsp)
leaq 40(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
movq 40(%rsp), %rdi
movq 96(%rsp), %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
movslq %r15d, %r15
shlq $2, %r15
movq %r15, %rdi
callq malloc
movq %rax, 32(%rsp)
leaq 32(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movq 32(%rsp), %rdi
movq 112(%rsp), %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq %r14, %r15
shlq $34, %r15
sarq $32, %r15
movq %r15, %rdi
callq malloc
movq %rax, 24(%rsp)
leaq 24(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq 104(%rsp), %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq %r15, %rdi
callq malloc
movq %rax, 16(%rsp)
leaq 16(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq 88(%rsp), %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq %r15, %rdi
callq malloc
movq %rax, 8(%rsp)
leaq 8(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movq 80(%rsp), %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq %r15, %rdi
callq malloc
movq %rax, (%rsp)
movq %rsp, %rdi
movq %r15, %rsi
callq hipMalloc
movq (%rsp), %rdi
movq 72(%rsp), %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
callq clock
movq %rax, 128(%rsp) # 8-byte Spill
movabsq $4294967331, %rdi # imm = 0x100000023
leaq 93(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_5
# %bb.4:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
movq 8(%rsp), %r8
movq (%rsp), %r9
movl $4480, 68(%rsp) # imm = 0x1180
movl %r14d, 64(%rsp)
movq %rax, 248(%rsp)
movq %rcx, 240(%rsp)
movq %rdx, 232(%rsp)
movq %rsi, 224(%rsp)
movq %rdi, 216(%rsp)
movq %r8, 208(%rsp)
movq %r9, 200(%rsp)
leaq 68(%rsp), %rax
movq %rax, 256(%rsp)
leaq 64(%rsp), %rax
movq %rax, 264(%rsp)
leaq 248(%rsp), %rax
movq %rax, 272(%rsp)
leaq 240(%rsp), %rax
movq %rax, 280(%rsp)
leaq 232(%rsp), %rax
movq %rax, 288(%rsp)
leaq 224(%rsp), %rax
movq %rax, 296(%rsp)
leaq 216(%rsp), %rax
movq %rax, 304(%rsp)
leaq 208(%rsp), %rax
movq %rax, 312(%rsp)
leaq 200(%rsp), %rax
movq %rax, 320(%rsp)
leaq 184(%rsp), %rdi
leaq 168(%rsp), %rsi
leaq 160(%rsp), %rdx
leaq 152(%rsp), %rcx
callq __hipPopCallConfiguration
movq 184(%rsp), %rsi
movl 192(%rsp), %edx
movq 168(%rsp), %rcx
movl 176(%rsp), %r8d
leaq 256(%rsp), %r9
movl $_Z21global_queuing_kerneliiPiS_S_S_S_S_S_, %edi
pushq 152(%rsp)
.cfi_adjust_cfa_offset 8
pushq 168(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_5:
callq clock
movq %rax, %r12
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB3_6
.LBB3_7: # %_Z12checkCudaErr10hipError_tPKc.exit67
callq hipGetLastError
testl %eax, %eax
jne .LBB3_8
.LBB3_9: # %_Z12checkCudaErr10hipError_tPKc.exit69
leaq 256(%rsp), %rdi
movl $numNextLevelNodes, %esi
movl $4, %edx
xorl %ecx, %ecx
movl $2, %r8d
callq hipMemcpyFromSymbol
movl $globalQueue, %esi
movq %rbx, %rdi
movq %r15, %rdx
xorl %ecx, %ecx
movl $2, %r8d
callq hipMemcpyFromSymbol
testl %eax, %eax
jne .LBB3_10
.LBB3_11: # %_Z12checkCudaErr10hipError_tPKc.exit71
movq %r15, %rdi
callq malloc
movq %rax, %r13
movq (%rsp), %rsi
movq %rax, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB3_12
.LBB3_13: # %_Z12checkCudaErr10hipError_tPKc.exit73
movl $.L.str.8, %esi
movq 136(%rsp), %rdi # 8-byte Reload
callq fopen
movq %rax, %r15
movl $.L.str.9, %esi
movq %rax, %rdi
movl %r14d, %edx
xorl %eax, %eax
callq fprintf
movl 60(%rsp), %eax # 4-byte Reload
testl %eax, %eax
jle .LBB3_16
# %bb.14: # %.lr.ph.preheader
movl %eax, %r14d
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB3_15: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl (%r13,%rbp,4), %edx
movl $.L.str.9, %esi
movq %r15, %rdi
xorl %eax, %eax
callq fprintf
incq %rbp
cmpq %rbp, %r14
jne .LBB3_15
.LBB3_16: # %._crit_edge
movq %r15, %rdi
callq fclose
movl $.L.str.8, %esi
movq 144(%rsp), %rdi # 8-byte Reload
callq fopen
movq %rax, %r14
movl 256(%rsp), %edx
movl $.L.str.9, %esi
movq %rax, %rdi
xorl %eax, %eax
callq fprintf
cmpl $0, 256(%rsp)
jle .LBB3_19
# %bb.17: # %.lr.ph80.preheader
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_18: # %.lr.ph80
# =>This Inner Loop Header: Depth=1
movl (%rbx,%r15,4), %edx
movl $.L.str.9, %esi
movq %r14, %rdi
xorl %eax, %eax
callq fprintf
incq %r15
movslq 256(%rsp), %rax
cmpq %rax, %r15
jl .LBB3_18
.LBB3_19: # %._crit_edge81
movq %r14, %rdi
callq fclose
subq 128(%rsp), %r12 # 8-byte Folded Reload
cvtsi2sd %r12, %xmm0
divsd .LCPI3_0(%rip), %xmm0
mulsd .LCPI3_1(%rip), %xmm0
movl $.L.str.10, %edi
movb $1, %al
callq printf
movq 120(%rsp), %rdi
callq free
movq 112(%rsp), %rdi
callq free
movq 104(%rsp), %rdi
callq free
movq 96(%rsp), %rdi
callq free
movq 88(%rsp), %rdi
callq free
movq 80(%rsp), %rdi
callq free
movq 72(%rsp), %rdi
callq free
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $328, %rsp # imm = 0x148
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_20:
.cfi_restore %rbx
.cfi_restore %rbp
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
movq stderr(%rip), %rdi
movl $.L.str.4, %esi
xorl %eax, %eax
jmp fprintf # TAILCALL
.LBB3_2:
.cfi_def_cfa_offset 384
.cfi_offset %rbx, -56
.cfi_offset %rbp, -16
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movq stderr(%rip), %rbp
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.5, %edx
movq %rbp, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
jmp .LBB3_3
.LBB3_6:
movq stderr(%rip), %r13
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.6, %edx
movq %r13, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
jmp .LBB3_7
.LBB3_8:
movq stderr(%rip), %r13
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.7, %edx
movq %r13, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
jmp .LBB3_9
.LBB3_10:
movq stderr(%rip), %r13
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.5, %edx
movq %r13, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
jmp .LBB3_11
.LBB3_12:
movq stderr(%rip), %r15
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movl $.L.str.5, %edx
movq %r15, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
jmp .LBB3_13
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
subq $32, %rsp
.cfi_adjust_cfa_offset 32
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21global_queuing_kerneliiPiS_S_S_S_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
addq $32, %rsp
.cfi_adjust_cfa_offset -32
movl $globalQueue, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $28000000, %r9d # imm = 0x1AB3F00
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $numNextLevelNodes, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $__hip_module_dtor, %edi
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "r"
.size .L.str, 2
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Couldn't open file for reading\n"
.size .L.str.1, 32
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%d"
.size .L.str.2, 3
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "%d, %d, %d, %d"
.size .L.str.3, 15
.type globalQueue,@object # @globalQueue
.local globalQueue
.comm globalQueue,28000000,16
.type numNextLevelNodes,@object # @numNextLevelNodes
.local numNextLevelNodes
.comm numNextLevelNodes,4,4
.type _Z21global_queuing_kerneliiPiS_S_S_S_S_S_,@object # @_Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.section .rodata,"a",@progbits
.globl _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.p2align 3, 0x0
_Z21global_queuing_kerneliiPiS_S_S_S_S_S_:
.quad _Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_
.size _Z21global_queuing_kerneliiPiS_S_S_S_S_S_, 8
.type .L.str.4,@object # @.str.4
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.4:
.asciz "Missing input argument(s)!\n"
.size .L.str.4, 28
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Copying"
.size .L.str.5, 8
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Synchronization"
.size .L.str.6, 16
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "GPU"
.size .L.str.7, 4
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "w"
.size .L.str.8, 2
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "%d\n"
.size .L.str.9, 4
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Execution time: %f ms\n"
.size .L.str.10, 23
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "Error at runtime %s: %s\n"
.size .L.str.11, 25
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z21global_queuing_kerneliiPiS_S_S_S_S_S_"
.size .L__unnamed_1, 42
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "globalQueue"
.size .L__unnamed_2, 12
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "numNextLevelNodes"
.size .L__unnamed_3, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z36__device_stub__global_queuing_kerneliiPiS_S_S_S_S_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym globalQueue
.addrsig_sym numNextLevelNodes
.addrsig_sym _Z21global_queuing_kerneliiPiS_S_S_S_S_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
using namespace std;
__global__ void Dot(int* d_a, int* d_b, int* d_c, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<size)
d_c[id]=d_a[id]*d_b[id];
}
__global__ void Add(int* d_c, int* d_out, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int t_id = threadIdx.x;
int b_id = blockIdx.x;
__shared__ int a[1024];
if(id < size)
a[t_id] = d_c[id];
__syncthreads();
for(int s = 512; s>0; s = s/2)
{
__syncthreads();
if(id>=size || id+s>=size)
continue;
if(t_id<s)
{
a[t_id]+=a[t_id + s];
}
}
__syncthreads();
if(t_id==0)
d_out[b_id] = a[t_id];
}
int main()
{
int size;
cout<<"Enter size : ";
cin>>size;
int h_a[size], h_b[size], h_ans;
int bytes=size*sizeof(int);
int length=(int)ceil(1.0*size/1024);
for(int i=0;i<size;i++)
{
h_a[i]=rand()%10;
h_b[i]=rand()%10;
}
int *d_a, *d_b, *d_c, *d_out, *d_ans;
cudaMalloc((void**)&d_a, bytes);
cudaMalloc((void**)&d_b, bytes);
cudaMalloc((void**)&d_c, bytes);
cudaMalloc((void**)&d_out, bytes);
cudaMalloc((void**)&d_ans, sizeof(int));
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
Dot<<<((int)ceil(1.0*size/1024)), 1024>>>(d_a, d_b, d_c, size);
Add<<<((int)ceil(1.0*size/1024)), 1024>>>(d_c, d_out, size);
Add<<<1, 1024>>>(d_out, d_ans, length);
cudaMemcpy(&h_ans, d_ans, sizeof(int), cudaMemcpyDeviceToHost);
int res=0;
for(int i=0;i<size;i++)
{
res+=(h_a[i]*h_b[i]);
}
if(h_ans==res)
cout<<"Correct result";
else
cout<<"Invalid";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_out);
cudaFree(d_ans);
} | code for sm_80
Function : _Z3AddPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0040*/ IMAD R2, R0, c[0x0][0x0], R3 ; /* 0x0000000000027a24 */
/* 0x001fca00078e0203 */
/*0050*/ ISETP.GE.AND P2, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f46270 */
/*0060*/ @!P2 IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff05a424 */
/* 0x000fc800078e00ff */
/*0070*/ @!P2 IMAD.WIDE R4, R2, R5, c[0x0][0x160] ; /* 0x000058000204a625 */
/* 0x000fcc00078e0205 */
/*0080*/ @!P2 LDG.E R4, [R4.64] ; /* 0x000000040404a981 */
/* 0x000ea2000c1e1900 */
/*0090*/ ISETP.GE.AND P0, PT, R2.reuse, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x040fe40003f06270 */
/*00a0*/ IADD3 R6, R2.reuse, 0x200, RZ ; /* 0x0000020002067810 */
/* 0x040fe40007ffe0ff */
/*00b0*/ IADD3 R8, R2, 0x100, RZ ; /* 0x0000010002087810 */
/* 0x000fe40007ffe0ff */
/*00c0*/ ISETP.GE.OR P1, PT, R6, c[0x0][0x170], P0 ; /* 0x00005c0006007a0c */
/* 0x000fc80000726670 */
/*00d0*/ ISETP.GT.OR P1, PT, R3, 0x1ff, P1 ; /* 0x000001ff0300780c */
/* 0x000fe20000f24670 */
/*00e0*/ @!P2 STS [R3.X4], R4 ; /* 0x000000040300a388 */
/* 0x004fe80000004800 */
/*00f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0100*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0110*/ ISETP.GE.OR P2, PT, R8, c[0x0][0x170], P0 ; /* 0x00005c0008007a0c */
/* 0x000fc80000746670 */
/*0120*/ ISETP.GT.OR P2, PT, R3, 0xff, P2 ; /* 0x000000ff0300780c */
/* 0x000fe20001744670 */
/*0130*/ @!P1 LDS R6, [R3.X4] ; /* 0x0000000003069984 */
/* 0x000fe80000004800 */
/*0140*/ @!P1 LDS R7, [R3.X4+0x800] ; /* 0x0008000003079984 */
/* 0x000e240000004800 */
/*0150*/ @!P1 IMAD.IADD R6, R6, 0x1, R7 ; /* 0x0000000106069824 */
/* 0x001fe200078e0207 */
/*0160*/ IADD3 R7, R2, 0x80, RZ ; /* 0x0000008002077810 */
/* 0x000fc80007ffe0ff */
/*0170*/ @!P1 STS [R3.X4], R6 ; /* 0x0000000603009388 */
/* 0x000fe80000004800 */
/*0180*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0190*/ ISETP.GE.OR P1, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000726670 */
/*01a0*/ IADD3 R7, R2, 0x40, RZ ; /* 0x0000004002077810 */
/* 0x000fe40007ffe0ff */
/*01b0*/ ISETP.GT.OR P1, PT, R3, 0x7f, P1 ; /* 0x0000007f0300780c */
/* 0x000fe20000f24670 */
/*01c0*/ @!P2 LDS R4, [R3.X4] ; /* 0x000000000304a984 */
/* 0x000fe80000004800 */
/*01d0*/ @!P2 LDS R5, [R3.X4+0x400] ; /* 0x000400000305a984 */
/* 0x000e240000004800 */
/*01e0*/ @!P2 IMAD.IADD R4, R4, 0x1, R5 ; /* 0x000000010404a824 */
/* 0x001fca00078e0205 */
/*01f0*/ @!P2 STS [R3.X4], R4 ; /* 0x000000040300a388 */
/* 0x000fe80000004800 */
/*0200*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0210*/ ISETP.GE.OR P2, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000746670 */
/*0220*/ IADD3 R7, R2, 0x20, RZ ; /* 0x0000002002077810 */
/* 0x000fe40007ffe0ff */
/*0230*/ ISETP.GT.OR P2, PT, R3, 0x3f, P2 ; /* 0x0000003f0300780c */
/* 0x000fe20001744670 */
/*0240*/ @!P1 LDS R5, [R3.X4] ; /* 0x0000000003059984 */
/* 0x000fe80000004800 */
/*0250*/ @!P1 LDS R6, [R3.X4+0x200] ; /* 0x0002000003069984 */
/* 0x000e240000004800 */
/*0260*/ @!P1 IMAD.IADD R6, R5, 0x1, R6 ; /* 0x0000000105069824 */
/* 0x001fca00078e0206 */
/*0270*/ @!P1 STS [R3.X4], R6 ; /* 0x0000000603009388 */
/* 0x000fe80000004800 */
/*0280*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0290*/ ISETP.GE.OR P1, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000726670 */
/*02a0*/ IADD3 R7, R2, 0x10, RZ ; /* 0x0000001002077810 */
/* 0x000fe40007ffe0ff */
/*02b0*/ ISETP.GT.OR P1, PT, R3, 0x1f, P1 ; /* 0x0000001f0300780c */
/* 0x000fe20000f24670 */
/*02c0*/ @!P2 LDS R5, [R3.X4] ; /* 0x000000000305a984 */
/* 0x000fe80000004800 */
/*02d0*/ @!P2 LDS R4, [R3.X4+0x100] ; /* 0x000100000304a984 */
/* 0x000e240000004800 */
/*02e0*/ @!P2 IMAD.IADD R4, R5, 0x1, R4 ; /* 0x000000010504a824 */
/* 0x001fca00078e0204 */
/*02f0*/ @!P2 STS [R3.X4], R4 ; /* 0x000000040300a388 */
/* 0x000fe80000004800 */
/*0300*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0310*/ ISETP.GE.OR P2, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000746670 */
/*0320*/ IADD3 R7, R2, 0x8, RZ ; /* 0x0000000802077810 */
/* 0x000fe40007ffe0ff */
/*0330*/ ISETP.GT.OR P2, PT, R3, 0xf, P2 ; /* 0x0000000f0300780c */
/* 0x000fe20001744670 */
/*0340*/ @!P1 LDS R5, [R3.X4] ; /* 0x0000000003059984 */
/* 0x000fe80000004800 */
/*0350*/ @!P1 LDS R6, [R3.X4+0x80] ; /* 0x0000800003069984 */
/* 0x000e240000004800 */
/*0360*/ @!P1 IMAD.IADD R6, R5, 0x1, R6 ; /* 0x0000000105069824 */
/* 0x001fca00078e0206 */
/*0370*/ @!P1 STS [R3.X4], R6 ; /* 0x0000000603009388 */
/* 0x000fe80000004800 */
/*0380*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0390*/ ISETP.GE.OR P1, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000726670 */
/*03a0*/ IADD3 R7, R2, 0x4, RZ ; /* 0x0000000402077810 */
/* 0x000fe40007ffe0ff */
/*03b0*/ ISETP.GT.OR P1, PT, R3, 0x7, P1 ; /* 0x000000070300780c */
/* 0x000fe20000f24670 */
/*03c0*/ @!P2 LDS R5, [R3.X4] ; /* 0x000000000305a984 */
/* 0x000fe80000004800 */
/*03d0*/ @!P2 LDS R4, [R3.X4+0x40] ; /* 0x000040000304a984 */
/* 0x000e240000004800 */
/*03e0*/ @!P2 IMAD.IADD R4, R5, 0x1, R4 ; /* 0x000000010504a824 */
/* 0x001fca00078e0204 */
/*03f0*/ @!P2 STS [R3.X4], R4 ; /* 0x000000040300a388 */
/* 0x000fe80000004800 */
/*0400*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0410*/ ISETP.GE.OR P2, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000746670 */
/*0420*/ IADD3 R7, R2.reuse, 0x2, RZ ; /* 0x0000000202077810 */
/* 0x040fe40007ffe0ff */
/*0430*/ ISETP.GT.OR P2, PT, R3, 0x3, P2 ; /* 0x000000030300780c */
/* 0x000fe40001744670 */
/*0440*/ IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102027810 */
/* 0x000fe20007ffe0ff */
/*0450*/ @!P1 LDS R5, [R3.X4] ; /* 0x0000000003059984 */
/* 0x000fe80000004800 */
/*0460*/ @!P1 LDS R6, [R3.X4+0x20] ; /* 0x0000200003069984 */
/* 0x000e240000004800 */
/*0470*/ @!P1 IMAD.IADD R6, R5, 0x1, R6 ; /* 0x0000000105069824 */
/* 0x001fca00078e0206 */
/*0480*/ @!P1 STS [R3.X4], R6 ; /* 0x0000000603009388 */
/* 0x000fe80000004800 */
/*0490*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*04a0*/ ISETP.GE.OR P1, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000726670 */
/*04b0*/ ISETP.GE.OR P0, PT, R2, c[0x0][0x170], P0 ; /* 0x00005c0002007a0c */
/* 0x000fe40000706670 */
/*04c0*/ ISETP.GT.OR P1, PT, R3.reuse, 0x1, P1 ; /* 0x000000010300780c */
/* 0x040fe40000f24670 */
/*04d0*/ ISETP.GT.OR P0, PT, R3, RZ, P0 ; /* 0x000000ff0300720c */
/* 0x000fe20000704670 */
/*04e0*/ @!P2 LDS R5, [R3.X4] ; /* 0x000000000305a984 */
/* 0x000fe80000004800 */
/*04f0*/ @!P2 LDS R4, [R3.X4+0x10] ; /* 0x000010000304a984 */
/* 0x000e240000004800 */
/*0500*/ @!P2 IMAD.IADD R4, R5, 0x1, R4 ; /* 0x000000010504a824 */
/* 0x001fca00078e0204 */
/*0510*/ @!P2 STS [R3.X4], R4 ; /* 0x000000040300a388 */
/* 0x000fe80000004800 */
/*0520*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0530*/ @!P1 LDS R5, [R3.X4] ; /* 0x0000000003059984 */
/* 0x000fe80000004800 */
/*0540*/ @!P1 LDS R6, [R3.X4+0x8] ; /* 0x0000080003069984 */
/* 0x000e240000004800 */
/*0550*/ @!P1 IMAD.IADD R6, R5, 0x1, R6 ; /* 0x0000000105069824 */
/* 0x001fca00078e0206 */
/*0560*/ @!P1 STS [R3.X4], R6 ; /* 0x0000000603009388 */
/* 0x000fe80000004800 */
/*0570*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0580*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fca0003f25270 */
/*0590*/ @!P0 LDS R2, [R3.X4] ; /* 0x0000000003028984 */
/* 0x000fe80000004800 */
/*05a0*/ @!P0 LDS R5, [R3.X4+0x4] ; /* 0x0000040003058984 */
/* 0x000e240000004800 */
/*05b0*/ @!P0 IMAD.IADD R2, R2, 0x1, R5 ; /* 0x0000000102028824 */
/* 0x001fca00078e0205 */
/*05c0*/ @!P0 STS [R3.X4], R2 ; /* 0x0000000203008388 */
/* 0x0001e80000004800 */
/*05d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*05e0*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*05f0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x001e220000000800 */
/*0600*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*0610*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fca00078e0203 */
/*0620*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0630*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0640*/ BRA 0x640; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0650*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0660*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0670*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0680*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0690*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z3DotPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00d0*/ IMAD R9, R4, R3, RZ ; /* 0x0000000304097224 */
/* 0x004fca00078e02ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
using namespace std;
__global__ void Dot(int* d_a, int* d_b, int* d_c, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<size)
d_c[id]=d_a[id]*d_b[id];
}
__global__ void Add(int* d_c, int* d_out, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int t_id = threadIdx.x;
int b_id = blockIdx.x;
__shared__ int a[1024];
if(id < size)
a[t_id] = d_c[id];
__syncthreads();
for(int s = 512; s>0; s = s/2)
{
__syncthreads();
if(id>=size || id+s>=size)
continue;
if(t_id<s)
{
a[t_id]+=a[t_id + s];
}
}
__syncthreads();
if(t_id==0)
d_out[b_id] = a[t_id];
}
int main()
{
int size;
cout<<"Enter size : ";
cin>>size;
int h_a[size], h_b[size], h_ans;
int bytes=size*sizeof(int);
int length=(int)ceil(1.0*size/1024);
for(int i=0;i<size;i++)
{
h_a[i]=rand()%10;
h_b[i]=rand()%10;
}
int *d_a, *d_b, *d_c, *d_out, *d_ans;
cudaMalloc((void**)&d_a, bytes);
cudaMalloc((void**)&d_b, bytes);
cudaMalloc((void**)&d_c, bytes);
cudaMalloc((void**)&d_out, bytes);
cudaMalloc((void**)&d_ans, sizeof(int));
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
Dot<<<((int)ceil(1.0*size/1024)), 1024>>>(d_a, d_b, d_c, size);
Add<<<((int)ceil(1.0*size/1024)), 1024>>>(d_c, d_out, size);
Add<<<1, 1024>>>(d_out, d_ans, length);
cudaMemcpy(&h_ans, d_ans, sizeof(int), cudaMemcpyDeviceToHost);
int res=0;
for(int i=0;i<size;i++)
{
res+=(h_a[i]*h_b[i]);
}
if(h_ans==res)
cout<<"Correct result";
else
cout<<"Invalid";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_out);
cudaFree(d_ans);
} | .file "tmpxft_001a1487_00000000-6_Resultant_Sum_of_dot_product.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z3DotPiS_S_iPiS_S_i
.type _Z27__device_stub__Z3DotPiS_S_iPiS_S_i, @function
_Z27__device_stub__Z3DotPiS_S_iPiS_S_i:
.LFB3694:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3DotPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z27__device_stub__Z3DotPiS_S_iPiS_S_i, .-_Z27__device_stub__Z3DotPiS_S_iPiS_S_i
.globl _Z3DotPiS_S_i
.type _Z3DotPiS_S_i, @function
_Z3DotPiS_S_i:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z3DotPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z3DotPiS_S_i, .-_Z3DotPiS_S_i
.globl _Z25__device_stub__Z3AddPiS_iPiS_i
.type _Z25__device_stub__Z3AddPiS_iPiS_i, @function
_Z25__device_stub__Z3AddPiS_iPiS_i:
.LFB3696:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3AddPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z25__device_stub__Z3AddPiS_iPiS_i, .-_Z25__device_stub__Z3AddPiS_iPiS_i
.globl _Z3AddPiS_i
.type _Z3AddPiS_i, @function
_Z3AddPiS_i:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z3AddPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z3AddPiS_i, .-_Z3AddPiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Enter size : "
.LC5:
.string "Correct result"
.LC6:
.string "Invalid"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $88, %rsp
.cfi_offset 15, -24
.cfi_offset 14, -32
.cfi_offset 13, -40
.cfi_offset 12, -48
.cfi_offset 3, -56
movq %fs:40, %rax
movq %rax, -56(%rbp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq -124(%rbp), %rsi
leaq _ZSt3cin(%rip), %rdi
call _ZNSirsERi@PLT
movl -124(%rbp), %r15d
movslq %r15d, %rax
salq $2, %rax
leaq 15(%rax), %rdx
movq %rdx, %rsi
andq $-16, %rsi
andq $-4096, %rdx
movq %rsp, %rcx
subq %rdx, %rcx
.L20:
cmpq %rcx, %rsp
je .L21
subq $4096, %rsp
orq $0, 4088(%rsp)
jmp .L20
.L21:
movq %rsi, %rdx
andl $4095, %edx
subq %rdx, %rsp
testq %rdx, %rdx
je .L22
orq $0, -8(%rsp,%rdx)
.L22:
movq %rsp, %r12
addq $15, %rax
movq %rax, %rcx
andq $-16, %rcx
andq $-4096, %rax
movq %rsp, %rdx
subq %rax, %rdx
.L23:
cmpq %rdx, %rsp
je .L24
subq $4096, %rsp
orq $0, 4088(%rsp)
jmp .L23
.L24:
movq %rcx, %rax
andl $4095, %eax
subq %rax, %rsp
testq %rax, %rax
je .L25
orq $0, -8(%rsp,%rax)
.L25:
movq %rsp, %r13
leal 0(,%r15,4), %r14d
testl %r15d, %r15d
jle .L26
movl $0, %ebx
.L27:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
movl %eax, (%r12,%rbx,4)
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
movl %eax, 0(%r13,%rbx,4)
addq $1, %rbx
cmpl %ebx, -124(%rbp)
jg .L27
.L26:
movslq %r14d, %r14
leaq -120(%rbp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq -112(%rbp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq -104(%rbp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq -96(%rbp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq -88(%rbp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $1, %ecx
movq %r14, %rdx
movq %r12, %rsi
movq -120(%rbp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r14, %rdx
movq %r13, %rsi
movq -112(%rbp), %rdi
call cudaMemcpy@PLT
movl $1024, -68(%rbp)
movl $1, -64(%rbp)
movl $1, -60(%rbp)
pxor %xmm0, %xmm0
cvtsi2sdl -124(%rbp), %xmm0
mulsd .LC1(%rip), %xmm0
movapd %xmm0, %xmm3
movsd .LC7(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC2(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L28
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC4(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L28:
cvttsd2sil %xmm3, %eax
movl %eax, -80(%rbp)
movl $1, -76(%rbp)
movl $1, -72(%rbp)
movl -60(%rbp), %ecx
movl $0, %r9d
movl $0, %r8d
movq -68(%rbp), %rdx
movq -80(%rbp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L42
.L29:
movl $1024, -68(%rbp)
movl $1, -64(%rbp)
movl $1, -60(%rbp)
pxor %xmm0, %xmm0
cvtsi2sdl -124(%rbp), %xmm0
mulsd .LC1(%rip), %xmm0
movapd %xmm0, %xmm3
movsd .LC7(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC2(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L30
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC4(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L30:
cvttsd2sil %xmm3, %eax
movl %eax, -80(%rbp)
movl $1, -76(%rbp)
movl $1, -72(%rbp)
movl -60(%rbp), %ecx
movl $0, %r9d
movl $0, %r8d
movq -68(%rbp), %rdx
movq -80(%rbp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L43
.L31:
movl $1024, -68(%rbp)
movl $1, -64(%rbp)
movl $1, -60(%rbp)
movl $1, -80(%rbp)
movl $1, -76(%rbp)
movl $1, -72(%rbp)
movl $0, %r9d
movl $0, %r8d
movq -68(%rbp), %rdx
movl $1, %ecx
movq -80(%rbp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L44
.L32:
leaq -68(%rbp), %rdi
movl $2, %ecx
movl $4, %edx
movq -88(%rbp), %rsi
call cudaMemcpy@PLT
movl -124(%rbp), %esi
testl %esi, %esi
jle .L38
movslq %esi, %rsi
salq $2, %rsi
movl $0, %eax
movl $0, %ecx
.L34:
movl (%r12,%rax), %edx
imull 0(%r13,%rax), %edx
addl %edx, %ecx
addq $4, %rax
cmpq %rax, %rsi
jne .L34
.L33:
cmpl %ecx, -68(%rbp)
je .L45
leaq .LC6(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
.L36:
movq -120(%rbp), %rdi
call cudaFree@PLT
movq -112(%rbp), %rdi
call cudaFree@PLT
movq -104(%rbp), %rdi
call cudaFree@PLT
movq -96(%rbp), %rdi
call cudaFree@PLT
movq -88(%rbp), %rdi
call cudaFree@PLT
movq -56(%rbp), %rax
subq %fs:40, %rax
jne .L46
movl $0, %eax
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.L42:
.cfi_restore_state
movl -124(%rbp), %ecx
movq -104(%rbp), %rdx
movq -112(%rbp), %rsi
movq -120(%rbp), %rdi
call _Z27__device_stub__Z3DotPiS_S_iPiS_S_i
jmp .L29
.L43:
movl -124(%rbp), %edx
movq -96(%rbp), %rsi
movq -104(%rbp), %rdi
call _Z25__device_stub__Z3AddPiS_iPiS_i
jmp .L31
.L44:
pxor %xmm0, %xmm0
cvtsi2sdl %r15d, %xmm0
mulsd .LC1(%rip), %xmm0
call ceil@PLT
cvttsd2sil %xmm0, %edx
movq -88(%rbp), %rsi
movq -96(%rbp), %rdi
call _Z25__device_stub__Z3AddPiS_iPiS_i
jmp .L32
.L38:
movl $0, %ecx
jmp .L33
.L45:
leaq .LC5(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
jmp .L36
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z3AddPiS_i"
.LC9:
.string "_Z3DotPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z3AddPiS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z3DotPiS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1062207488
.align 8
.LC2:
.long 0
.long 1127219200
.align 8
.LC4:
.long 0
.long 1072693248
.align 8
.LC7:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
using namespace std;
__global__ void Dot(int* d_a, int* d_b, int* d_c, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<size)
d_c[id]=d_a[id]*d_b[id];
}
__global__ void Add(int* d_c, int* d_out, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int t_id = threadIdx.x;
int b_id = blockIdx.x;
__shared__ int a[1024];
if(id < size)
a[t_id] = d_c[id];
__syncthreads();
for(int s = 512; s>0; s = s/2)
{
__syncthreads();
if(id>=size || id+s>=size)
continue;
if(t_id<s)
{
a[t_id]+=a[t_id + s];
}
}
__syncthreads();
if(t_id==0)
d_out[b_id] = a[t_id];
}
int main()
{
int size;
cout<<"Enter size : ";
cin>>size;
int h_a[size], h_b[size], h_ans;
int bytes=size*sizeof(int);
int length=(int)ceil(1.0*size/1024);
for(int i=0;i<size;i++)
{
h_a[i]=rand()%10;
h_b[i]=rand()%10;
}
int *d_a, *d_b, *d_c, *d_out, *d_ans;
cudaMalloc((void**)&d_a, bytes);
cudaMalloc((void**)&d_b, bytes);
cudaMalloc((void**)&d_c, bytes);
cudaMalloc((void**)&d_out, bytes);
cudaMalloc((void**)&d_ans, sizeof(int));
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
Dot<<<((int)ceil(1.0*size/1024)), 1024>>>(d_a, d_b, d_c, size);
Add<<<((int)ceil(1.0*size/1024)), 1024>>>(d_c, d_out, size);
Add<<<1, 1024>>>(d_out, d_ans, length);
cudaMemcpy(&h_ans, d_ans, sizeof(int), cudaMemcpyDeviceToHost);
int res=0;
for(int i=0;i<size;i++)
{
res+=(h_a[i]*h_b[i]);
}
if(h_ans==res)
cout<<"Correct result";
else
cout<<"Invalid";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_out);
cudaFree(d_ans);
} | #include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
__global__ void Dot(int* d_a, int* d_b, int* d_c, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<size)
d_c[id]=d_a[id]*d_b[id];
}
__global__ void Add(int* d_c, int* d_out, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int t_id = threadIdx.x;
int b_id = blockIdx.x;
__shared__ int a[1024];
if(id < size)
a[t_id] = d_c[id];
__syncthreads();
for(int s = 512; s>0; s = s/2)
{
__syncthreads();
if(id>=size || id+s>=size)
continue;
if(t_id<s)
{
a[t_id]+=a[t_id + s];
}
}
__syncthreads();
if(t_id==0)
d_out[b_id] = a[t_id];
}
int main()
{
int size;
cout<<"Enter size : ";
cin>>size;
int h_a[size], h_b[size], h_ans;
int bytes=size*sizeof(int);
int length=(int)ceil(1.0*size/1024);
for(int i=0;i<size;i++)
{
h_a[i]=rand()%10;
h_b[i]=rand()%10;
}
int *d_a, *d_b, *d_c, *d_out, *d_ans;
hipMalloc((void**)&d_a, bytes);
hipMalloc((void**)&d_b, bytes);
hipMalloc((void**)&d_c, bytes);
hipMalloc((void**)&d_out, bytes);
hipMalloc((void**)&d_ans, sizeof(int));
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
Dot<<<((int)ceil(1.0*size/1024)), 1024>>>(d_a, d_b, d_c, size);
Add<<<((int)ceil(1.0*size/1024)), 1024>>>(d_c, d_out, size);
Add<<<1, 1024>>>(d_out, d_ans, length);
hipMemcpy(&h_ans, d_ans, sizeof(int), hipMemcpyDeviceToHost);
int res=0;
for(int i=0;i<size;i++)
{
res+=(h_a[i]*h_b[i]);
}
if(h_ans==res)
cout<<"Correct result";
else
cout<<"Invalid";
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(d_out);
hipFree(d_ans);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
__global__ void Dot(int* d_a, int* d_b, int* d_c, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<size)
d_c[id]=d_a[id]*d_b[id];
}
__global__ void Add(int* d_c, int* d_out, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int t_id = threadIdx.x;
int b_id = blockIdx.x;
__shared__ int a[1024];
if(id < size)
a[t_id] = d_c[id];
__syncthreads();
for(int s = 512; s>0; s = s/2)
{
__syncthreads();
if(id>=size || id+s>=size)
continue;
if(t_id<s)
{
a[t_id]+=a[t_id + s];
}
}
__syncthreads();
if(t_id==0)
d_out[b_id] = a[t_id];
}
int main()
{
int size;
cout<<"Enter size : ";
cin>>size;
int h_a[size], h_b[size], h_ans;
int bytes=size*sizeof(int);
int length=(int)ceil(1.0*size/1024);
for(int i=0;i<size;i++)
{
h_a[i]=rand()%10;
h_b[i]=rand()%10;
}
int *d_a, *d_b, *d_c, *d_out, *d_ans;
hipMalloc((void**)&d_a, bytes);
hipMalloc((void**)&d_b, bytes);
hipMalloc((void**)&d_c, bytes);
hipMalloc((void**)&d_out, bytes);
hipMalloc((void**)&d_ans, sizeof(int));
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
Dot<<<((int)ceil(1.0*size/1024)), 1024>>>(d_a, d_b, d_c, size);
Add<<<((int)ceil(1.0*size/1024)), 1024>>>(d_c, d_out, size);
Add<<<1, 1024>>>(d_out, d_ans, length);
hipMemcpy(&h_ans, d_ans, sizeof(int), hipMemcpyDeviceToHost);
int res=0;
for(int i=0;i<size;i++)
{
res+=(h_a[i]*h_b[i]);
}
if(h_ans==res)
cout<<"Correct result";
else
cout<<"Invalid";
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(d_out);
hipFree(d_ans);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3DotPiS_S_i
.globl _Z3DotPiS_S_i
.p2align 8
.type _Z3DotPiS_S_i,@function
_Z3DotPiS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_mul_lo_u32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3DotPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3DotPiS_S_i, .Lfunc_end0-_Z3DotPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z3AddPiS_i
.globl _Z3AddPiS_i
.p2align 8
.type _Z3AddPiS_i,@function
_Z3AddPiS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s5, s[0:1], 0x10
s_mov_b32 s4, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s4, s2, v[0:1]
v_cmp_gt_i32_e32 vcc_lo, s5, v1
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB1_2
s_load_b64 s[6:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, s2, s6, v2
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s2, s7, v3, s2
global_load_b32 v2, v[2:3], off
v_lshlrev_b32_e32 v3, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v3, v2
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s3
v_lshlrev_b32_e32 v2, 2, v0
s_movk_i32 s6, 0x200
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_4
.p2align 6
.LBB1_3:
s_or_b32 exec_lo, exec_lo, s2
s_lshr_b32 s2, s6, 1
s_cmp_lt_u32 s6, 2
s_mov_b32 s6, s2
s_cbranch_scc1 .LBB1_6
.LBB1_4:
v_add_nc_u32_e32 v3, s6, v1
v_cmp_gt_u32_e64 s3, s6, v0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmp_gt_i32_e64 s2, s5, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_b32 s3, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB1_3
v_add_lshl_u32 v3, s6, v0, 2
ds_load_b32 v3, v3
ds_load_b32 v4, v2
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v3, v4, v3
ds_store_b32 v2, v3
s_branch .LBB1_3
.LBB1_6:
s_set_inst_prefetch_distance 0x2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB1_8
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_ashr_i32 s5, s4, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[2:3], s[4:5], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB1_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3AddPiS_i
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z3AddPiS_i, .Lfunc_end1-_Z3AddPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3DotPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3DotPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3AddPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3AddPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
__global__ void Dot(int* d_a, int* d_b, int* d_c, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<size)
d_c[id]=d_a[id]*d_b[id];
}
__global__ void Add(int* d_c, int* d_out, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int t_id = threadIdx.x;
int b_id = blockIdx.x;
__shared__ int a[1024];
if(id < size)
a[t_id] = d_c[id];
__syncthreads();
for(int s = 512; s>0; s = s/2)
{
__syncthreads();
if(id>=size || id+s>=size)
continue;
if(t_id<s)
{
a[t_id]+=a[t_id + s];
}
}
__syncthreads();
if(t_id==0)
d_out[b_id] = a[t_id];
}
int main()
{
int size;
cout<<"Enter size : ";
cin>>size;
int h_a[size], h_b[size], h_ans;
int bytes=size*sizeof(int);
int length=(int)ceil(1.0*size/1024);
for(int i=0;i<size;i++)
{
h_a[i]=rand()%10;
h_b[i]=rand()%10;
}
int *d_a, *d_b, *d_c, *d_out, *d_ans;
hipMalloc((void**)&d_a, bytes);
hipMalloc((void**)&d_b, bytes);
hipMalloc((void**)&d_c, bytes);
hipMalloc((void**)&d_out, bytes);
hipMalloc((void**)&d_ans, sizeof(int));
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
Dot<<<((int)ceil(1.0*size/1024)), 1024>>>(d_a, d_b, d_c, size);
Add<<<((int)ceil(1.0*size/1024)), 1024>>>(d_c, d_out, size);
Add<<<1, 1024>>>(d_out, d_ans, length);
hipMemcpy(&h_ans, d_ans, sizeof(int), hipMemcpyDeviceToHost);
int res=0;
for(int i=0;i<size;i++)
{
res+=(h_a[i]*h_b[i]);
}
if(h_ans==res)
cout<<"Correct result";
else
cout<<"Invalid";
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(d_out);
hipFree(d_ans);
} | .text
.file "Resultant_Sum_of_dot_product.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__DotPiS_S_i # -- Begin function _Z18__device_stub__DotPiS_S_i
.p2align 4, 0x90
.type _Z18__device_stub__DotPiS_S_i,@function
_Z18__device_stub__DotPiS_S_i: # @_Z18__device_stub__DotPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3DotPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z18__device_stub__DotPiS_S_i, .Lfunc_end0-_Z18__device_stub__DotPiS_S_i
.cfi_endproc
# -- End function
.globl _Z18__device_stub__AddPiS_i # -- Begin function _Z18__device_stub__AddPiS_i
.p2align 4, 0x90
.type _Z18__device_stub__AddPiS_i,@function
_Z18__device_stub__AddPiS_i: # @_Z18__device_stub__AddPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3AddPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z18__device_stub__AddPiS_i, .Lfunc_end1-_Z18__device_stub__AddPiS_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x3f50000000000000 # double 9.765625E-4
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $184, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $13, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
leaq -44(%rbp), %rsi
movl $_ZSt3cin, %edi
callq _ZNSirsERi
movq %rsp, %r13
movl -44(%rbp), %eax
movq %rsp, %rbx
leaq 15(,%rax,4), %rax
andq $-16, %rax
subq %rax, %rbx
movq %rbx, %rsp
movl -44(%rbp), %eax
movq %rsp, %r14
leaq 15(,%rax,4), %rcx
andq $-16, %rcx
subq %rcx, %r14
movq %r14, %rsp
cvtsi2sd %eax, %xmm0
leal (,%rax,4), %r12d
mulsd .LCPI2_0(%rip), %xmm0
callq ceil@PLT
movsd %xmm0, -216(%rbp) # 8-byte Spill
cmpl $0, -44(%rbp)
jle .LBB2_3
# %bb.1: # %.lr.ph.preheader
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movl %eax, (%r14,%r15,4)
incq %r15
movslq -44(%rbp), %rax
cmpq %rax, %r15
jl .LBB2_2
.LBB2_3: # %._crit_edge
movabsq $4294968320, %r15 # imm = 0x100000400
movslq %r12d, %r12
leaq -160(%rbp), %rdi
movq %r12, %rsi
callq hipMalloc
leaq -152(%rbp), %rdi
movq %r12, %rsi
callq hipMalloc
leaq -144(%rbp), %rdi
movq %r12, %rsi
callq hipMalloc
leaq -136(%rbp), %rdi
movq %r12, %rsi
callq hipMalloc
leaq -128(%rbp), %rdi
movl $4, %esi
callq hipMalloc
movq -160(%rbp), %rdi
movq %rbx, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
movq -152(%rbp), %rdi
movq %r14, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
xorps %xmm0, %xmm0
cvtsi2sdl -44(%rbp), %xmm0
mulsd .LCPI2_0(%rip), %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %eax
leaq (%rax,%r15), %rdi
addq $-1024, %rdi # imm = 0xFC00
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_5
# %bb.4:
movq -160(%rbp), %rax
movq -152(%rbp), %rcx
movq -144(%rbp), %rdx
movl -44(%rbp), %esi
movq %rax, -120(%rbp)
movq %rcx, -112(%rbp)
movq %rdx, -72(%rbp)
movl %esi, -164(%rbp)
leaq -120(%rbp), %rax
movq %rax, -208(%rbp)
leaq -112(%rbp), %rax
movq %rax, -200(%rbp)
leaq -72(%rbp), %rax
movq %rax, -192(%rbp)
leaq -164(%rbp), %rax
movq %rax, -184(%rbp)
leaq -104(%rbp), %rdi
leaq -88(%rbp), %rsi
leaq -64(%rbp), %rdx
leaq -56(%rbp), %rcx
callq __hipPopCallConfiguration
movq -104(%rbp), %rsi
movl -96(%rbp), %edx
movq -88(%rbp), %rcx
movl -80(%rbp), %r8d
leaq -208(%rbp), %r9
movl $_Z3DotPiS_S_i, %edi
pushq -56(%rbp)
pushq -64(%rbp)
callq hipLaunchKernel
addq $16, %rsp
.LBB2_5:
xorps %xmm0, %xmm0
cvtsi2sdl -44(%rbp), %xmm0
mulsd .LCPI2_0(%rip), %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %eax
leaq (%r15,%rax), %rdi
addq $-1024, %rdi # imm = 0xFC00
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_7
# %bb.6:
movq -144(%rbp), %rax
movq -136(%rbp), %rcx
movl -44(%rbp), %edx
movq %rax, -120(%rbp)
movq %rcx, -112(%rbp)
movl %edx, -56(%rbp)
leaq -120(%rbp), %rax
movq %rax, -208(%rbp)
leaq -112(%rbp), %rax
movq %rax, -200(%rbp)
leaq -56(%rbp), %rax
movq %rax, -192(%rbp)
leaq -104(%rbp), %rdi
leaq -88(%rbp), %rsi
leaq -72(%rbp), %rdx
leaq -64(%rbp), %rcx
callq __hipPopCallConfiguration
movq -104(%rbp), %rsi
movl -96(%rbp), %edx
movq -88(%rbp), %rcx
movl -80(%rbp), %r8d
leaq -208(%rbp), %r9
movl $_Z3AddPiS_i, %edi
pushq -64(%rbp)
pushq -72(%rbp)
callq hipLaunchKernel
addq $16, %rsp
.LBB2_7:
leaq -1023(%r15), %rdi
xorl %r12d, %r12d
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_9
# %bb.8:
cvttsd2si -216(%rbp), %eax # 8-byte Folded Reload
movq -136(%rbp), %rcx
movq -128(%rbp), %rdx
movq %rcx, -120(%rbp)
movq %rdx, -112(%rbp)
movl %eax, -56(%rbp)
leaq -120(%rbp), %rax
movq %rax, -208(%rbp)
leaq -112(%rbp), %rax
movq %rax, -200(%rbp)
leaq -56(%rbp), %rax
movq %rax, -192(%rbp)
leaq -104(%rbp), %rdi
leaq -88(%rbp), %rsi
leaq -72(%rbp), %rdx
leaq -64(%rbp), %rcx
callq __hipPopCallConfiguration
movq -104(%rbp), %rsi
movl -96(%rbp), %edx
movq -88(%rbp), %rcx
movl -80(%rbp), %r8d
leaq -208(%rbp), %r9
movl $_Z3AddPiS_i, %edi
pushq -64(%rbp)
pushq -72(%rbp)
callq hipLaunchKernel
addq $16, %rsp
.LBB2_9:
movq -128(%rbp), %rsi
leaq -208(%rbp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl -44(%rbp), %eax
testl %eax, %eax
jle .LBB2_12
# %bb.10: # %.lr.ph63.preheader
xorl %ecx, %ecx
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_11: # %.lr.ph63
# =>This Inner Loop Header: Depth=1
movl (%r14,%rcx,4), %edx
imull (%rbx,%rcx,4), %edx
addl %edx, %r12d
incq %rcx
cmpq %rcx, %rax
jne .LBB2_11
.LBB2_12: # %._crit_edge64
cmpl %r12d, -208(%rbp)
jne .LBB2_14
# %bb.13:
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $14, %edx
jmp .LBB2_15
.LBB2_14:
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $7, %edx
.LBB2_15:
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq -160(%rbp), %rdi
callq hipFree
movq -152(%rbp), %rdi
callq hipFree
movq -144(%rbp), %rdi
callq hipFree
movq -136(%rbp), %rdi
callq hipFree
movq -128(%rbp), %rdi
callq hipFree
movq %r13, %rsp
xorl %eax, %eax
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_def_cfa %rsp, 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3DotPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3AddPiS_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3DotPiS_S_i,@object # @_Z3DotPiS_S_i
.section .rodata,"a",@progbits
.globl _Z3DotPiS_S_i
.p2align 3, 0x0
_Z3DotPiS_S_i:
.quad _Z18__device_stub__DotPiS_S_i
.size _Z3DotPiS_S_i, 8
.type _Z3AddPiS_i,@object # @_Z3AddPiS_i
.globl _Z3AddPiS_i
.p2align 3, 0x0
_Z3AddPiS_i:
.quad _Z18__device_stub__AddPiS_i
.size _Z3AddPiS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter size : "
.size .L.str, 14
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Correct result"
.size .L.str.1, 15
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Invalid"
.size .L.str.2, 8
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3DotPiS_S_i"
.size .L__unnamed_1, 14
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z3AddPiS_i"
.size .L__unnamed_2, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__DotPiS_S_i
.addrsig_sym _Z18__device_stub__AddPiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3DotPiS_S_i
.addrsig_sym _Z3AddPiS_i
.addrsig_sym _ZSt4cout
.addrsig_sym _ZSt3cin
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3AddPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0040*/ IMAD R2, R0, c[0x0][0x0], R3 ; /* 0x0000000000027a24 */
/* 0x001fca00078e0203 */
/*0050*/ ISETP.GE.AND P2, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f46270 */
/*0060*/ @!P2 IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff05a424 */
/* 0x000fc800078e00ff */
/*0070*/ @!P2 IMAD.WIDE R4, R2, R5, c[0x0][0x160] ; /* 0x000058000204a625 */
/* 0x000fcc00078e0205 */
/*0080*/ @!P2 LDG.E R4, [R4.64] ; /* 0x000000040404a981 */
/* 0x000ea2000c1e1900 */
/*0090*/ ISETP.GE.AND P0, PT, R2.reuse, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x040fe40003f06270 */
/*00a0*/ IADD3 R6, R2.reuse, 0x200, RZ ; /* 0x0000020002067810 */
/* 0x040fe40007ffe0ff */
/*00b0*/ IADD3 R8, R2, 0x100, RZ ; /* 0x0000010002087810 */
/* 0x000fe40007ffe0ff */
/*00c0*/ ISETP.GE.OR P1, PT, R6, c[0x0][0x170], P0 ; /* 0x00005c0006007a0c */
/* 0x000fc80000726670 */
/*00d0*/ ISETP.GT.OR P1, PT, R3, 0x1ff, P1 ; /* 0x000001ff0300780c */
/* 0x000fe20000f24670 */
/*00e0*/ @!P2 STS [R3.X4], R4 ; /* 0x000000040300a388 */
/* 0x004fe80000004800 */
/*00f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0100*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0110*/ ISETP.GE.OR P2, PT, R8, c[0x0][0x170], P0 ; /* 0x00005c0008007a0c */
/* 0x000fc80000746670 */
/*0120*/ ISETP.GT.OR P2, PT, R3, 0xff, P2 ; /* 0x000000ff0300780c */
/* 0x000fe20001744670 */
/*0130*/ @!P1 LDS R6, [R3.X4] ; /* 0x0000000003069984 */
/* 0x000fe80000004800 */
/*0140*/ @!P1 LDS R7, [R3.X4+0x800] ; /* 0x0008000003079984 */
/* 0x000e240000004800 */
/*0150*/ @!P1 IMAD.IADD R6, R6, 0x1, R7 ; /* 0x0000000106069824 */
/* 0x001fe200078e0207 */
/*0160*/ IADD3 R7, R2, 0x80, RZ ; /* 0x0000008002077810 */
/* 0x000fc80007ffe0ff */
/*0170*/ @!P1 STS [R3.X4], R6 ; /* 0x0000000603009388 */
/* 0x000fe80000004800 */
/*0180*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0190*/ ISETP.GE.OR P1, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000726670 */
/*01a0*/ IADD3 R7, R2, 0x40, RZ ; /* 0x0000004002077810 */
/* 0x000fe40007ffe0ff */
/*01b0*/ ISETP.GT.OR P1, PT, R3, 0x7f, P1 ; /* 0x0000007f0300780c */
/* 0x000fe20000f24670 */
/*01c0*/ @!P2 LDS R4, [R3.X4] ; /* 0x000000000304a984 */
/* 0x000fe80000004800 */
/*01d0*/ @!P2 LDS R5, [R3.X4+0x400] ; /* 0x000400000305a984 */
/* 0x000e240000004800 */
/*01e0*/ @!P2 IMAD.IADD R4, R4, 0x1, R5 ; /* 0x000000010404a824 */
/* 0x001fca00078e0205 */
/*01f0*/ @!P2 STS [R3.X4], R4 ; /* 0x000000040300a388 */
/* 0x000fe80000004800 */
/*0200*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0210*/ ISETP.GE.OR P2, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000746670 */
/*0220*/ IADD3 R7, R2, 0x20, RZ ; /* 0x0000002002077810 */
/* 0x000fe40007ffe0ff */
/*0230*/ ISETP.GT.OR P2, PT, R3, 0x3f, P2 ; /* 0x0000003f0300780c */
/* 0x000fe20001744670 */
/*0240*/ @!P1 LDS R5, [R3.X4] ; /* 0x0000000003059984 */
/* 0x000fe80000004800 */
/*0250*/ @!P1 LDS R6, [R3.X4+0x200] ; /* 0x0002000003069984 */
/* 0x000e240000004800 */
/*0260*/ @!P1 IMAD.IADD R6, R5, 0x1, R6 ; /* 0x0000000105069824 */
/* 0x001fca00078e0206 */
/*0270*/ @!P1 STS [R3.X4], R6 ; /* 0x0000000603009388 */
/* 0x000fe80000004800 */
/*0280*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0290*/ ISETP.GE.OR P1, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000726670 */
/*02a0*/ IADD3 R7, R2, 0x10, RZ ; /* 0x0000001002077810 */
/* 0x000fe40007ffe0ff */
/*02b0*/ ISETP.GT.OR P1, PT, R3, 0x1f, P1 ; /* 0x0000001f0300780c */
/* 0x000fe20000f24670 */
/*02c0*/ @!P2 LDS R5, [R3.X4] ; /* 0x000000000305a984 */
/* 0x000fe80000004800 */
/*02d0*/ @!P2 LDS R4, [R3.X4+0x100] ; /* 0x000100000304a984 */
/* 0x000e240000004800 */
/*02e0*/ @!P2 IMAD.IADD R4, R5, 0x1, R4 ; /* 0x000000010504a824 */
/* 0x001fca00078e0204 */
/*02f0*/ @!P2 STS [R3.X4], R4 ; /* 0x000000040300a388 */
/* 0x000fe80000004800 */
/*0300*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0310*/ ISETP.GE.OR P2, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000746670 */
/*0320*/ IADD3 R7, R2, 0x8, RZ ; /* 0x0000000802077810 */
/* 0x000fe40007ffe0ff */
/*0330*/ ISETP.GT.OR P2, PT, R3, 0xf, P2 ; /* 0x0000000f0300780c */
/* 0x000fe20001744670 */
/*0340*/ @!P1 LDS R5, [R3.X4] ; /* 0x0000000003059984 */
/* 0x000fe80000004800 */
/*0350*/ @!P1 LDS R6, [R3.X4+0x80] ; /* 0x0000800003069984 */
/* 0x000e240000004800 */
/*0360*/ @!P1 IMAD.IADD R6, R5, 0x1, R6 ; /* 0x0000000105069824 */
/* 0x001fca00078e0206 */
/*0370*/ @!P1 STS [R3.X4], R6 ; /* 0x0000000603009388 */
/* 0x000fe80000004800 */
/*0380*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0390*/ ISETP.GE.OR P1, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000726670 */
/*03a0*/ IADD3 R7, R2, 0x4, RZ ; /* 0x0000000402077810 */
/* 0x000fe40007ffe0ff */
/*03b0*/ ISETP.GT.OR P1, PT, R3, 0x7, P1 ; /* 0x000000070300780c */
/* 0x000fe20000f24670 */
/*03c0*/ @!P2 LDS R5, [R3.X4] ; /* 0x000000000305a984 */
/* 0x000fe80000004800 */
/*03d0*/ @!P2 LDS R4, [R3.X4+0x40] ; /* 0x000040000304a984 */
/* 0x000e240000004800 */
/*03e0*/ @!P2 IMAD.IADD R4, R5, 0x1, R4 ; /* 0x000000010504a824 */
/* 0x001fca00078e0204 */
/*03f0*/ @!P2 STS [R3.X4], R4 ; /* 0x000000040300a388 */
/* 0x000fe80000004800 */
/*0400*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0410*/ ISETP.GE.OR P2, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000746670 */
/*0420*/ IADD3 R7, R2.reuse, 0x2, RZ ; /* 0x0000000202077810 */
/* 0x040fe40007ffe0ff */
/*0430*/ ISETP.GT.OR P2, PT, R3, 0x3, P2 ; /* 0x000000030300780c */
/* 0x000fe40001744670 */
/*0440*/ IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102027810 */
/* 0x000fe20007ffe0ff */
/*0450*/ @!P1 LDS R5, [R3.X4] ; /* 0x0000000003059984 */
/* 0x000fe80000004800 */
/*0460*/ @!P1 LDS R6, [R3.X4+0x20] ; /* 0x0000200003069984 */
/* 0x000e240000004800 */
/*0470*/ @!P1 IMAD.IADD R6, R5, 0x1, R6 ; /* 0x0000000105069824 */
/* 0x001fca00078e0206 */
/*0480*/ @!P1 STS [R3.X4], R6 ; /* 0x0000000603009388 */
/* 0x000fe80000004800 */
/*0490*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*04a0*/ ISETP.GE.OR P1, PT, R7, c[0x0][0x170], P0 ; /* 0x00005c0007007a0c */
/* 0x000fe40000726670 */
/*04b0*/ ISETP.GE.OR P0, PT, R2, c[0x0][0x170], P0 ; /* 0x00005c0002007a0c */
/* 0x000fe40000706670 */
/*04c0*/ ISETP.GT.OR P1, PT, R3.reuse, 0x1, P1 ; /* 0x000000010300780c */
/* 0x040fe40000f24670 */
/*04d0*/ ISETP.GT.OR P0, PT, R3, RZ, P0 ; /* 0x000000ff0300720c */
/* 0x000fe20000704670 */
/*04e0*/ @!P2 LDS R5, [R3.X4] ; /* 0x000000000305a984 */
/* 0x000fe80000004800 */
/*04f0*/ @!P2 LDS R4, [R3.X4+0x10] ; /* 0x000010000304a984 */
/* 0x000e240000004800 */
/*0500*/ @!P2 IMAD.IADD R4, R5, 0x1, R4 ; /* 0x000000010504a824 */
/* 0x001fca00078e0204 */
/*0510*/ @!P2 STS [R3.X4], R4 ; /* 0x000000040300a388 */
/* 0x000fe80000004800 */
/*0520*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0530*/ @!P1 LDS R5, [R3.X4] ; /* 0x0000000003059984 */
/* 0x000fe80000004800 */
/*0540*/ @!P1 LDS R6, [R3.X4+0x8] ; /* 0x0000080003069984 */
/* 0x000e240000004800 */
/*0550*/ @!P1 IMAD.IADD R6, R5, 0x1, R6 ; /* 0x0000000105069824 */
/* 0x001fca00078e0206 */
/*0560*/ @!P1 STS [R3.X4], R6 ; /* 0x0000000603009388 */
/* 0x000fe80000004800 */
/*0570*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0580*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fca0003f25270 */
/*0590*/ @!P0 LDS R2, [R3.X4] ; /* 0x0000000003028984 */
/* 0x000fe80000004800 */
/*05a0*/ @!P0 LDS R5, [R3.X4+0x4] ; /* 0x0000040003058984 */
/* 0x000e240000004800 */
/*05b0*/ @!P0 IMAD.IADD R2, R2, 0x1, R5 ; /* 0x0000000102028824 */
/* 0x001fca00078e0205 */
/*05c0*/ @!P0 STS [R3.X4], R2 ; /* 0x0000000203008388 */
/* 0x0001e80000004800 */
/*05d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*05e0*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*05f0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x001e220000000800 */
/*0600*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*0610*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fca00078e0203 */
/*0620*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0630*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0640*/ BRA 0x640; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0650*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0660*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0670*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0680*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0690*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z3DotPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00d0*/ IMAD R9, R4, R3, RZ ; /* 0x0000000304097224 */
/* 0x004fca00078e02ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3DotPiS_S_i
.globl _Z3DotPiS_S_i
.p2align 8
.type _Z3DotPiS_S_i,@function
_Z3DotPiS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_mul_lo_u32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3DotPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3DotPiS_S_i, .Lfunc_end0-_Z3DotPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z3AddPiS_i
.globl _Z3AddPiS_i
.p2align 8
.type _Z3AddPiS_i,@function
_Z3AddPiS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s5, s[0:1], 0x10
s_mov_b32 s4, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s4, s2, v[0:1]
v_cmp_gt_i32_e32 vcc_lo, s5, v1
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB1_2
s_load_b64 s[6:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, s2, s6, v2
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s2, s7, v3, s2
global_load_b32 v2, v[2:3], off
v_lshlrev_b32_e32 v3, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v3, v2
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s3
v_lshlrev_b32_e32 v2, 2, v0
s_movk_i32 s6, 0x200
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_4
.p2align 6
.LBB1_3:
s_or_b32 exec_lo, exec_lo, s2
s_lshr_b32 s2, s6, 1
s_cmp_lt_u32 s6, 2
s_mov_b32 s6, s2
s_cbranch_scc1 .LBB1_6
.LBB1_4:
v_add_nc_u32_e32 v3, s6, v1
v_cmp_gt_u32_e64 s3, s6, v0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmp_gt_i32_e64 s2, s5, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_b32 s3, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB1_3
v_add_lshl_u32 v3, s6, v0, 2
ds_load_b32 v3, v3
ds_load_b32 v4, v2
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v3, v4, v3
ds_store_b32 v2, v3
s_branch .LBB1_3
.LBB1_6:
s_set_inst_prefetch_distance 0x2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB1_8
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_ashr_i32 s5, s4, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[2:3], s[4:5], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB1_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3AddPiS_i
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z3AddPiS_i, .Lfunc_end1-_Z3AddPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3DotPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3DotPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3AddPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3AddPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001a1487_00000000-6_Resultant_Sum_of_dot_product.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z3DotPiS_S_iPiS_S_i
.type _Z27__device_stub__Z3DotPiS_S_iPiS_S_i, @function
_Z27__device_stub__Z3DotPiS_S_iPiS_S_i:
.LFB3694:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3DotPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z27__device_stub__Z3DotPiS_S_iPiS_S_i, .-_Z27__device_stub__Z3DotPiS_S_iPiS_S_i
.globl _Z3DotPiS_S_i
.type _Z3DotPiS_S_i, @function
_Z3DotPiS_S_i:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z3DotPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z3DotPiS_S_i, .-_Z3DotPiS_S_i
.globl _Z25__device_stub__Z3AddPiS_iPiS_i
.type _Z25__device_stub__Z3AddPiS_iPiS_i, @function
_Z25__device_stub__Z3AddPiS_iPiS_i:
.LFB3696:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3AddPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z25__device_stub__Z3AddPiS_iPiS_i, .-_Z25__device_stub__Z3AddPiS_iPiS_i
.globl _Z3AddPiS_i
.type _Z3AddPiS_i, @function
_Z3AddPiS_i:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z3AddPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z3AddPiS_i, .-_Z3AddPiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Enter size : "
.LC5:
.string "Correct result"
.LC6:
.string "Invalid"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $88, %rsp
.cfi_offset 15, -24
.cfi_offset 14, -32
.cfi_offset 13, -40
.cfi_offset 12, -48
.cfi_offset 3, -56
movq %fs:40, %rax
movq %rax, -56(%rbp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq -124(%rbp), %rsi
leaq _ZSt3cin(%rip), %rdi
call _ZNSirsERi@PLT
movl -124(%rbp), %r15d
movslq %r15d, %rax
salq $2, %rax
leaq 15(%rax), %rdx
movq %rdx, %rsi
andq $-16, %rsi
andq $-4096, %rdx
movq %rsp, %rcx
subq %rdx, %rcx
.L20:
cmpq %rcx, %rsp
je .L21
subq $4096, %rsp
orq $0, 4088(%rsp)
jmp .L20
.L21:
movq %rsi, %rdx
andl $4095, %edx
subq %rdx, %rsp
testq %rdx, %rdx
je .L22
orq $0, -8(%rsp,%rdx)
.L22:
movq %rsp, %r12
addq $15, %rax
movq %rax, %rcx
andq $-16, %rcx
andq $-4096, %rax
movq %rsp, %rdx
subq %rax, %rdx
.L23:
cmpq %rdx, %rsp
je .L24
subq $4096, %rsp
orq $0, 4088(%rsp)
jmp .L23
.L24:
movq %rcx, %rax
andl $4095, %eax
subq %rax, %rsp
testq %rax, %rax
je .L25
orq $0, -8(%rsp,%rax)
.L25:
movq %rsp, %r13
leal 0(,%r15,4), %r14d
testl %r15d, %r15d
jle .L26
movl $0, %ebx
.L27:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
movl %eax, (%r12,%rbx,4)
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
movl %eax, 0(%r13,%rbx,4)
addq $1, %rbx
cmpl %ebx, -124(%rbp)
jg .L27
.L26:
movslq %r14d, %r14
leaq -120(%rbp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq -112(%rbp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq -104(%rbp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq -96(%rbp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq -88(%rbp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $1, %ecx
movq %r14, %rdx
movq %r12, %rsi
movq -120(%rbp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r14, %rdx
movq %r13, %rsi
movq -112(%rbp), %rdi
call cudaMemcpy@PLT
movl $1024, -68(%rbp)
movl $1, -64(%rbp)
movl $1, -60(%rbp)
pxor %xmm0, %xmm0
cvtsi2sdl -124(%rbp), %xmm0
mulsd .LC1(%rip), %xmm0
movapd %xmm0, %xmm3
movsd .LC7(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC2(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L28
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC4(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L28:
cvttsd2sil %xmm3, %eax
movl %eax, -80(%rbp)
movl $1, -76(%rbp)
movl $1, -72(%rbp)
movl -60(%rbp), %ecx
movl $0, %r9d
movl $0, %r8d
movq -68(%rbp), %rdx
movq -80(%rbp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L42
.L29:
movl $1024, -68(%rbp)
movl $1, -64(%rbp)
movl $1, -60(%rbp)
pxor %xmm0, %xmm0
cvtsi2sdl -124(%rbp), %xmm0
mulsd .LC1(%rip), %xmm0
movapd %xmm0, %xmm3
movsd .LC7(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC2(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L30
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC4(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L30:
cvttsd2sil %xmm3, %eax
movl %eax, -80(%rbp)
movl $1, -76(%rbp)
movl $1, -72(%rbp)
movl -60(%rbp), %ecx
movl $0, %r9d
movl $0, %r8d
movq -68(%rbp), %rdx
movq -80(%rbp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L43
.L31:
movl $1024, -68(%rbp)
movl $1, -64(%rbp)
movl $1, -60(%rbp)
movl $1, -80(%rbp)
movl $1, -76(%rbp)
movl $1, -72(%rbp)
movl $0, %r9d
movl $0, %r8d
movq -68(%rbp), %rdx
movl $1, %ecx
movq -80(%rbp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L44
.L32:
leaq -68(%rbp), %rdi
movl $2, %ecx
movl $4, %edx
movq -88(%rbp), %rsi
call cudaMemcpy@PLT
movl -124(%rbp), %esi
testl %esi, %esi
jle .L38
movslq %esi, %rsi
salq $2, %rsi
movl $0, %eax
movl $0, %ecx
.L34:
movl (%r12,%rax), %edx
imull 0(%r13,%rax), %edx
addl %edx, %ecx
addq $4, %rax
cmpq %rax, %rsi
jne .L34
.L33:
cmpl %ecx, -68(%rbp)
je .L45
leaq .LC6(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
.L36:
movq -120(%rbp), %rdi
call cudaFree@PLT
movq -112(%rbp), %rdi
call cudaFree@PLT
movq -104(%rbp), %rdi
call cudaFree@PLT
movq -96(%rbp), %rdi
call cudaFree@PLT
movq -88(%rbp), %rdi
call cudaFree@PLT
movq -56(%rbp), %rax
subq %fs:40, %rax
jne .L46
movl $0, %eax
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.L42:
.cfi_restore_state
movl -124(%rbp), %ecx
movq -104(%rbp), %rdx
movq -112(%rbp), %rsi
movq -120(%rbp), %rdi
call _Z27__device_stub__Z3DotPiS_S_iPiS_S_i
jmp .L29
.L43:
movl -124(%rbp), %edx
movq -96(%rbp), %rsi
movq -104(%rbp), %rdi
call _Z25__device_stub__Z3AddPiS_iPiS_i
jmp .L31
.L44:
pxor %xmm0, %xmm0
cvtsi2sdl %r15d, %xmm0
mulsd .LC1(%rip), %xmm0
call ceil@PLT
cvttsd2sil %xmm0, %edx
movq -88(%rbp), %rsi
movq -96(%rbp), %rdi
call _Z25__device_stub__Z3AddPiS_iPiS_i
jmp .L32
.L38:
movl $0, %ecx
jmp .L33
.L45:
leaq .LC5(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
jmp .L36
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z3AddPiS_i"
.LC9:
.string "_Z3DotPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z3AddPiS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z3DotPiS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1062207488
.align 8
.LC2:
.long 0
.long 1127219200
.align 8
.LC4:
.long 0
.long 1072693248
.align 8
.LC7:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "Resultant_Sum_of_dot_product.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__DotPiS_S_i # -- Begin function _Z18__device_stub__DotPiS_S_i
.p2align 4, 0x90
.type _Z18__device_stub__DotPiS_S_i,@function
_Z18__device_stub__DotPiS_S_i: # @_Z18__device_stub__DotPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3DotPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z18__device_stub__DotPiS_S_i, .Lfunc_end0-_Z18__device_stub__DotPiS_S_i
.cfi_endproc
# -- End function
.globl _Z18__device_stub__AddPiS_i # -- Begin function _Z18__device_stub__AddPiS_i
.p2align 4, 0x90
.type _Z18__device_stub__AddPiS_i,@function
_Z18__device_stub__AddPiS_i: # @_Z18__device_stub__AddPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3AddPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z18__device_stub__AddPiS_i, .Lfunc_end1-_Z18__device_stub__AddPiS_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x3f50000000000000 # double 9.765625E-4
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $184, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $13, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
leaq -44(%rbp), %rsi
movl $_ZSt3cin, %edi
callq _ZNSirsERi
movq %rsp, %r13
movl -44(%rbp), %eax
movq %rsp, %rbx
leaq 15(,%rax,4), %rax
andq $-16, %rax
subq %rax, %rbx
movq %rbx, %rsp
movl -44(%rbp), %eax
movq %rsp, %r14
leaq 15(,%rax,4), %rcx
andq $-16, %rcx
subq %rcx, %r14
movq %r14, %rsp
cvtsi2sd %eax, %xmm0
leal (,%rax,4), %r12d
mulsd .LCPI2_0(%rip), %xmm0
callq ceil@PLT
movsd %xmm0, -216(%rbp) # 8-byte Spill
cmpl $0, -44(%rbp)
jle .LBB2_3
# %bb.1: # %.lr.ph.preheader
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movl %eax, (%r14,%r15,4)
incq %r15
movslq -44(%rbp), %rax
cmpq %rax, %r15
jl .LBB2_2
.LBB2_3: # %._crit_edge
movabsq $4294968320, %r15 # imm = 0x100000400
movslq %r12d, %r12
leaq -160(%rbp), %rdi
movq %r12, %rsi
callq hipMalloc
leaq -152(%rbp), %rdi
movq %r12, %rsi
callq hipMalloc
leaq -144(%rbp), %rdi
movq %r12, %rsi
callq hipMalloc
leaq -136(%rbp), %rdi
movq %r12, %rsi
callq hipMalloc
leaq -128(%rbp), %rdi
movl $4, %esi
callq hipMalloc
movq -160(%rbp), %rdi
movq %rbx, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
movq -152(%rbp), %rdi
movq %r14, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
xorps %xmm0, %xmm0
cvtsi2sdl -44(%rbp), %xmm0
mulsd .LCPI2_0(%rip), %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %eax
leaq (%rax,%r15), %rdi
addq $-1024, %rdi # imm = 0xFC00
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_5
# %bb.4:
movq -160(%rbp), %rax
movq -152(%rbp), %rcx
movq -144(%rbp), %rdx
movl -44(%rbp), %esi
movq %rax, -120(%rbp)
movq %rcx, -112(%rbp)
movq %rdx, -72(%rbp)
movl %esi, -164(%rbp)
leaq -120(%rbp), %rax
movq %rax, -208(%rbp)
leaq -112(%rbp), %rax
movq %rax, -200(%rbp)
leaq -72(%rbp), %rax
movq %rax, -192(%rbp)
leaq -164(%rbp), %rax
movq %rax, -184(%rbp)
leaq -104(%rbp), %rdi
leaq -88(%rbp), %rsi
leaq -64(%rbp), %rdx
leaq -56(%rbp), %rcx
callq __hipPopCallConfiguration
movq -104(%rbp), %rsi
movl -96(%rbp), %edx
movq -88(%rbp), %rcx
movl -80(%rbp), %r8d
leaq -208(%rbp), %r9
movl $_Z3DotPiS_S_i, %edi
pushq -56(%rbp)
pushq -64(%rbp)
callq hipLaunchKernel
addq $16, %rsp
.LBB2_5:
xorps %xmm0, %xmm0
cvtsi2sdl -44(%rbp), %xmm0
mulsd .LCPI2_0(%rip), %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %eax
leaq (%r15,%rax), %rdi
addq $-1024, %rdi # imm = 0xFC00
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_7
# %bb.6:
movq -144(%rbp), %rax
movq -136(%rbp), %rcx
movl -44(%rbp), %edx
movq %rax, -120(%rbp)
movq %rcx, -112(%rbp)
movl %edx, -56(%rbp)
leaq -120(%rbp), %rax
movq %rax, -208(%rbp)
leaq -112(%rbp), %rax
movq %rax, -200(%rbp)
leaq -56(%rbp), %rax
movq %rax, -192(%rbp)
leaq -104(%rbp), %rdi
leaq -88(%rbp), %rsi
leaq -72(%rbp), %rdx
leaq -64(%rbp), %rcx
callq __hipPopCallConfiguration
movq -104(%rbp), %rsi
movl -96(%rbp), %edx
movq -88(%rbp), %rcx
movl -80(%rbp), %r8d
leaq -208(%rbp), %r9
movl $_Z3AddPiS_i, %edi
pushq -64(%rbp)
pushq -72(%rbp)
callq hipLaunchKernel
addq $16, %rsp
.LBB2_7:
leaq -1023(%r15), %rdi
xorl %r12d, %r12d
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_9
# %bb.8:
cvttsd2si -216(%rbp), %eax # 8-byte Folded Reload
movq -136(%rbp), %rcx
movq -128(%rbp), %rdx
movq %rcx, -120(%rbp)
movq %rdx, -112(%rbp)
movl %eax, -56(%rbp)
leaq -120(%rbp), %rax
movq %rax, -208(%rbp)
leaq -112(%rbp), %rax
movq %rax, -200(%rbp)
leaq -56(%rbp), %rax
movq %rax, -192(%rbp)
leaq -104(%rbp), %rdi
leaq -88(%rbp), %rsi
leaq -72(%rbp), %rdx
leaq -64(%rbp), %rcx
callq __hipPopCallConfiguration
movq -104(%rbp), %rsi
movl -96(%rbp), %edx
movq -88(%rbp), %rcx
movl -80(%rbp), %r8d
leaq -208(%rbp), %r9
movl $_Z3AddPiS_i, %edi
pushq -64(%rbp)
pushq -72(%rbp)
callq hipLaunchKernel
addq $16, %rsp
.LBB2_9:
movq -128(%rbp), %rsi
leaq -208(%rbp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl -44(%rbp), %eax
testl %eax, %eax
jle .LBB2_12
# %bb.10: # %.lr.ph63.preheader
xorl %ecx, %ecx
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_11: # %.lr.ph63
# =>This Inner Loop Header: Depth=1
movl (%r14,%rcx,4), %edx
imull (%rbx,%rcx,4), %edx
addl %edx, %r12d
incq %rcx
cmpq %rcx, %rax
jne .LBB2_11
.LBB2_12: # %._crit_edge64
cmpl %r12d, -208(%rbp)
jne .LBB2_14
# %bb.13:
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $14, %edx
jmp .LBB2_15
.LBB2_14:
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $7, %edx
.LBB2_15:
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq -160(%rbp), %rdi
callq hipFree
movq -152(%rbp), %rdi
callq hipFree
movq -144(%rbp), %rdi
callq hipFree
movq -136(%rbp), %rdi
callq hipFree
movq -128(%rbp), %rdi
callq hipFree
movq %r13, %rsp
xorl %eax, %eax
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_def_cfa %rsp, 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3DotPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3AddPiS_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3DotPiS_S_i,@object # @_Z3DotPiS_S_i
.section .rodata,"a",@progbits
.globl _Z3DotPiS_S_i
.p2align 3, 0x0
_Z3DotPiS_S_i:
.quad _Z18__device_stub__DotPiS_S_i
.size _Z3DotPiS_S_i, 8
.type _Z3AddPiS_i,@object # @_Z3AddPiS_i
.globl _Z3AddPiS_i
.p2align 3, 0x0
_Z3AddPiS_i:
.quad _Z18__device_stub__AddPiS_i
.size _Z3AddPiS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter size : "
.size .L.str, 14
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Correct result"
.size .L.str.1, 15
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Invalid"
.size .L.str.2, 8
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3DotPiS_S_i"
.size .L__unnamed_1, 14
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z3AddPiS_i"
.size .L__unnamed_2, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__DotPiS_S_i
.addrsig_sym _Z18__device_stub__AddPiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3DotPiS_S_i
.addrsig_sym _Z3AddPiS_i
.addrsig_sym _ZSt4cout
.addrsig_sym _ZSt3cin
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void g_getSoftMaxP(float* softMaxP, float* b, int cols, int row){
int bid = blockIdx.x;
extern __shared__ float _share[];
float * _max = _share;
float * _sum = _share + blockDim.x;
float* sp = softMaxP + bid;
_sum[threadIdx.x] = 0.0;
_max[threadIdx.x] = -100000000.0;
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
//int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] += b[tid];
_max[threadIdx.x] = max(_max[threadIdx.x], sp[tid]);
//}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
if(_max[threadIdx.x] < _max[threadIdx.x + skip])
{
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
// int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] -= _max[0];
sp[tid] = __expf(sp[tid]);
_sum[threadIdx.x] += sp[tid];
//}
}
__syncthreads();
len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
//int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] /= _sum[0];
//}
}
} | .file "tmpxft_001256e6_00000000-6_g_getSoftMaxP.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z13g_getSoftMaxPPfS_iiPfS_ii
.type _Z37__device_stub__Z13g_getSoftMaxPPfS_iiPfS_ii, @function
_Z37__device_stub__Z13g_getSoftMaxPPfS_iiPfS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13g_getSoftMaxPPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z37__device_stub__Z13g_getSoftMaxPPfS_iiPfS_ii, .-_Z37__device_stub__Z13g_getSoftMaxPPfS_iiPfS_ii
.globl _Z13g_getSoftMaxPPfS_ii
.type _Z13g_getSoftMaxPPfS_ii, @function
_Z13g_getSoftMaxPPfS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z13g_getSoftMaxPPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z13g_getSoftMaxPPfS_ii, .-_Z13g_getSoftMaxPPfS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13g_getSoftMaxPPfS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13g_getSoftMaxPPfS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void g_getSoftMaxP(float* softMaxP, float* b, int cols, int row){
int bid = blockIdx.x;
extern __shared__ float _share[];
float * _max = _share;
float * _sum = _share + blockDim.x;
float* sp = softMaxP + bid;
_sum[threadIdx.x] = 0.0;
_max[threadIdx.x] = -100000000.0;
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
//int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] += b[tid];
_max[threadIdx.x] = max(_max[threadIdx.x], sp[tid]);
//}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
if(_max[threadIdx.x] < _max[threadIdx.x + skip])
{
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
// int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] -= _max[0];
sp[tid] = __expf(sp[tid]);
_sum[threadIdx.x] += sp[tid];
//}
}
__syncthreads();
len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
//int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] /= _sum[0];
//}
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void g_getSoftMaxP(float* softMaxP, float* b, int cols, int row){
int bid = blockIdx.x;
extern __shared__ float _share[];
float * _max = _share;
float * _sum = _share + blockDim.x;
float* sp = softMaxP + bid;
_sum[threadIdx.x] = 0.0;
_max[threadIdx.x] = -100000000.0;
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
//int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] += b[tid];
_max[threadIdx.x] = max(_max[threadIdx.x], sp[tid]);
//}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
if(_max[threadIdx.x] < _max[threadIdx.x + skip])
{
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
// int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] -= _max[0];
sp[tid] = __expf(sp[tid]);
_sum[threadIdx.x] += sp[tid];
//}
}
__syncthreads();
len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
//int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] /= _sum[0];
//}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void g_getSoftMaxP(float* softMaxP, float* b, int cols, int row){
int bid = blockIdx.x;
extern __shared__ float _share[];
float * _max = _share;
float * _sum = _share + blockDim.x;
float* sp = softMaxP + bid;
_sum[threadIdx.x] = 0.0;
_max[threadIdx.x] = -100000000.0;
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
//int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] += b[tid];
_max[threadIdx.x] = max(_max[threadIdx.x], sp[tid]);
//}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
if(_max[threadIdx.x] < _max[threadIdx.x + skip])
{
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
// int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] -= _max[0];
sp[tid] = __expf(sp[tid]);
_sum[threadIdx.x] += sp[tid];
//}
}
__syncthreads();
len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
//int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] /= _sum[0];
//}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13g_getSoftMaxPPfS_ii
.globl _Z13g_getSoftMaxPPfS_ii
.p2align 8
.type _Z13g_getSoftMaxPPfS_ii,@function
_Z13g_getSoftMaxPPfS_ii:
s_clause 0x2
s_load_b64 s[2:3], s[0:1], 0x10
s_load_b32 s14, s[0:1], 0x24
s_load_b64 s[6:7], s[0:1], 0x0
s_mov_b32 s4, s15
s_ashr_i32 s5, s15, 31
v_dual_mov_b32 v3, 0 :: v_dual_lshlrev_b32 v4, 2, v0
v_mov_b32_e32 v7, 0xccbebc20
s_mov_b32 s16, 0
s_mov_b32 s15, exec_lo
s_delay_alu instid0(VALU_DEP_2)
v_add_nc_u32_e32 v6, 0, v4
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[1:2], null, v0, s2, s[4:5]
s_and_b32 s8, s14, 0xffff
s_mul_i32 s12, s3, s2
s_lshl_b32 s8, s8, 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s13, s8, 0
v_add_nc_u32_e32 v5, s13, v4
s_delay_alu instid0(VALU_DEP_2)
v_ashrrev_i32_e32 v2, 31, v1
ds_store_b32 v5, v3
ds_store_b32 v6, v7
v_cmpx_gt_i32_e64 s12, v1
s_cbranch_execz .LBB0_4
s_load_b64 s[8:9], s[0:1], 0x8
ds_load_b32 v7, v6
s_lshl_b64 s[10:11], s[4:5], 2
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_add_u32 s1, s6, s10
v_mov_b32_e32 v8, v1
s_addc_u32 s17, s7, s11
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[10:11], s[2:3], 2
.p2align 6
.LBB0_2:
s_waitcnt lgkmcnt(0)
v_add_co_u32 v9, vcc_lo, s8, v3
v_add_co_ci_u32_e32 v10, vcc_lo, s9, v4, vcc_lo
v_add_co_u32 v11, vcc_lo, s1, v3
v_add_co_ci_u32_e32 v12, vcc_lo, s17, v4, vcc_lo
v_dual_max_f32 v7, v7, v7 :: v_dual_add_nc_u32 v8, s2, v8
global_load_b32 v9, v[9:10], off
global_load_b32 v10, v[11:12], off
v_add_co_u32 v3, vcc_lo, v3, s10
v_cmp_le_i32_e64 s0, s12, v8
v_add_co_ci_u32_e32 v4, vcc_lo, s11, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_or_b32 s16, s0, s16
s_waitcnt vmcnt(0)
v_add_f32_e32 v9, v9, v10
v_max_f32_e32 v7, v7, v9
global_store_b32 v[11:12], v9, off
s_and_not1_b32 exec_lo, exec_lo, s16
s_cbranch_execnz .LBB0_2
s_or_b32 exec_lo, exec_lo, s16
ds_store_b32 v6, v7
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s15
v_cmp_eq_u16_e64 s0, s14, 1
s_and_b32 s1, 0xffff, s14
s_waitcnt lgkmcnt(0)
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_and_b32 vcc_lo, exec_lo, s0
s_cbranch_vccnz .LBB0_10
v_lshl_add_u32 v3, v0, 2, 0
s_mov_b32 s0, s1
s_branch .LBB0_7
.p2align 6
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s3
s_cmp_lg_u32 s0, 1
s_cbranch_scc0 .LBB0_10
.LBB0_7:
s_lshr_b32 s3, s0, 1
s_add_i32 s0, s0, 1
v_cmp_gt_u32_e32 vcc_lo, s3, v0
s_lshr_b32 s0, s0, 1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB0_6
v_lshl_add_u32 v4, s0, 2, v3
ds_load_b32 v7, v6
ds_load_b32 v4, v4
s_waitcnt lgkmcnt(0)
v_cmp_lt_f32_e32 vcc_lo, v7, v4
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_6
ds_store_b32 v6, v4
s_branch .LBB0_6
.LBB0_10:
s_mov_b32 s10, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_i32_e64 s12, v1
s_cbranch_execz .LBB0_13
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_lshl_b64 s[8:9], s[4:5], 2
v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v7, v1
s_add_u32 s0, s6, s8
s_addc_u32 s3, s7, s9
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_add_co_u32 v3, vcc_lo, s0, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
s_ashr_i32 s3, s2, 31
s_lshl_b64 s[8:9], s[2:3], 2
s_mov_b32 s3, 0
.p2align 6
.LBB0_12:
global_load_b32 v8, v[3:4], off
ds_load_b32 v9, v6
ds_load_b32 v10, v5
v_add_nc_u32_e32 v7, s2, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_cmp_le_i32_e32 vcc_lo, s12, v7
s_or_b32 s3, vcc_lo, s3
s_waitcnt vmcnt(0) lgkmcnt(1)
v_sub_f32_e32 v8, v8, v9
v_mul_f32_e32 v8, 0x3fb8aa3b, v8
s_delay_alu instid0(VALU_DEP_1)
v_exp_f32_e32 v8, v8
global_store_b32 v[3:4], v8, off
v_add_co_u32 v3, s0, v3, s8
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v8, v10, v8
v_add_co_ci_u32_e64 v4, s0, s9, v4, s0
ds_store_b32 v5, v8
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_12
.LBB0_13:
s_or_b32 exec_lo, exec_lo, s10
s_cmp_eq_u32 s1, 1
s_waitcnt lgkmcnt(0)
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_18
v_lshl_add_u32 v3, v0, 2, s13
s_branch .LBB0_16
.p2align 6
.LBB0_15:
s_or_b32 exec_lo, exec_lo, s0
s_cmp_lg_u32 s1, 1
s_cbranch_scc0 .LBB0_18
.LBB0_16:
s_lshr_b32 s0, s1, 1
s_add_i32 s1, s1, 1
v_cmp_gt_u32_e32 vcc_lo, s0, v0
s_lshr_b32 s1, s1, 1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s0, vcc_lo
s_cbranch_execz .LBB0_15
v_lshl_add_u32 v4, s1, 2, v3
ds_load_b32 v4, v4
ds_load_b32 v6, v5
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v4, v4, v6
ds_store_b32 v5, v4
s_branch .LBB0_15
.LBB0_18:
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_mov_b32 s0, exec_lo
v_cmpx_gt_i32_e64 s12, v1
s_cbranch_execz .LBB0_21
v_mov_b32_e32 v0, s13
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_lshl_b64 s[0:1], s[4:5], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s0, s6, s0
ds_load_b32 v0, v0
s_addc_u32 s1, s7, s1
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_ashr_i32 s3, s2, 31
s_mov_b32 s1, 0
s_lshl_b64 s[4:5], s[2:3], 2
.p2align 6
.LBB0_20:
global_load_b32 v4, v[2:3], off
v_add_nc_u32_e32 v1, s2, v1
s_waitcnt vmcnt(0) lgkmcnt(0)
v_div_scale_f32 v5, null, v0, v0, v4
v_div_scale_f32 v8, vcc_lo, v4, v0, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v6, v5
s_waitcnt_depctr 0xfff
v_fma_f32 v7, -v5, v6, 1.0
v_fmac_f32_e32 v6, v7, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v7, v8, v6
v_fma_f32 v9, -v5, v7, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v7, v9, v6
v_fma_f32 v5, -v5, v7, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_fmas_f32 v5, v5, v6, v7
v_cmp_le_i32_e32 vcc_lo, s12, v1
v_div_fixup_f32 v4, v5, v0, v4
s_or_b32 s1, vcc_lo, s1
global_store_b32 v[2:3], v4, off
v_add_co_u32 v2, s0, v2, s4
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s0, s5, v3, s0
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB0_20
.LBB0_21:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13g_getSoftMaxPPfS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 13
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13g_getSoftMaxPPfS_ii, .Lfunc_end0-_Z13g_getSoftMaxPPfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13g_getSoftMaxPPfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z13g_getSoftMaxPPfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 13
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void g_getSoftMaxP(float* softMaxP, float* b, int cols, int row){
int bid = blockIdx.x;
extern __shared__ float _share[];
float * _max = _share;
float * _sum = _share + blockDim.x;
float* sp = softMaxP + bid;
_sum[threadIdx.x] = 0.0;
_max[threadIdx.x] = -100000000.0;
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
//int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] += b[tid];
_max[threadIdx.x] = max(_max[threadIdx.x], sp[tid]);
//}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
if(_max[threadIdx.x] < _max[threadIdx.x + skip])
{
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
// int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] -= _max[0];
sp[tid] = __expf(sp[tid]);
_sum[threadIdx.x] += sp[tid];
//}
}
__syncthreads();
len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = threadIdx.x * cols + blockIdx.x; tid < row * cols; tid += cols){
//int id = tid + threadIdx.x;
//if(id < cols){
sp[tid] /= _sum[0];
//}
}
} | .text
.file "g_getSoftMaxP.hip"
.globl _Z28__device_stub__g_getSoftMaxPPfS_ii # -- Begin function _Z28__device_stub__g_getSoftMaxPPfS_ii
.p2align 4, 0x90
.type _Z28__device_stub__g_getSoftMaxPPfS_ii,@function
_Z28__device_stub__g_getSoftMaxPPfS_ii: # @_Z28__device_stub__g_getSoftMaxPPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13g_getSoftMaxPPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z28__device_stub__g_getSoftMaxPPfS_ii, .Lfunc_end0-_Z28__device_stub__g_getSoftMaxPPfS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13g_getSoftMaxPPfS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13g_getSoftMaxPPfS_ii,@object # @_Z13g_getSoftMaxPPfS_ii
.section .rodata,"a",@progbits
.globl _Z13g_getSoftMaxPPfS_ii
.p2align 3, 0x0
_Z13g_getSoftMaxPPfS_ii:
.quad _Z28__device_stub__g_getSoftMaxPPfS_ii
.size _Z13g_getSoftMaxPPfS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13g_getSoftMaxPPfS_ii"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__g_getSoftMaxPPfS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13g_getSoftMaxPPfS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001256e6_00000000-6_g_getSoftMaxP.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z13g_getSoftMaxPPfS_iiPfS_ii
.type _Z37__device_stub__Z13g_getSoftMaxPPfS_iiPfS_ii, @function
_Z37__device_stub__Z13g_getSoftMaxPPfS_iiPfS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13g_getSoftMaxPPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z37__device_stub__Z13g_getSoftMaxPPfS_iiPfS_ii, .-_Z37__device_stub__Z13g_getSoftMaxPPfS_iiPfS_ii
.globl _Z13g_getSoftMaxPPfS_ii
.type _Z13g_getSoftMaxPPfS_ii, @function
_Z13g_getSoftMaxPPfS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z13g_getSoftMaxPPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z13g_getSoftMaxPPfS_ii, .-_Z13g_getSoftMaxPPfS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13g_getSoftMaxPPfS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13g_getSoftMaxPPfS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "g_getSoftMaxP.hip"
.globl _Z28__device_stub__g_getSoftMaxPPfS_ii # -- Begin function _Z28__device_stub__g_getSoftMaxPPfS_ii
.p2align 4, 0x90
.type _Z28__device_stub__g_getSoftMaxPPfS_ii,@function
_Z28__device_stub__g_getSoftMaxPPfS_ii: # @_Z28__device_stub__g_getSoftMaxPPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13g_getSoftMaxPPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z28__device_stub__g_getSoftMaxPPfS_ii, .Lfunc_end0-_Z28__device_stub__g_getSoftMaxPPfS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13g_getSoftMaxPPfS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13g_getSoftMaxPPfS_ii,@object # @_Z13g_getSoftMaxPPfS_ii
.section .rodata,"a",@progbits
.globl _Z13g_getSoftMaxPPfS_ii
.p2align 3, 0x0
_Z13g_getSoftMaxPPfS_ii:
.quad _Z28__device_stub__g_getSoftMaxPPfS_ii
.size _Z13g_getSoftMaxPPfS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13g_getSoftMaxPPfS_ii"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__g_getSoftMaxPPfS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13g_getSoftMaxPPfS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | extern "C"{
#define RGB2GRAY_CONST_ARR_SIZE 3
#define STRONG_EDGE 255
#define NON_EDGE 0.0
#define KERNEL_SIZE 7
//*****************************************************************************************
// CUDA Gaussian Filter Implementation
//*****************************************************************************************
///
/// \brief Apply gaussian filter. This is the CUDA kernel for applying a gaussian blur to an image.
///
__global__ void cu_apply_gaussian_filter(float3 *in_pixels, float3 *out_pixels, int rows, int cols, double *in_kernel)
{
//copy kernel array from global memory to a shared array
__shared__ double kernel[KERNEL_SIZE][KERNEL_SIZE];
for (int i = 0; i < KERNEL_SIZE; ++i) {
for (int j = 0; j < KERNEL_SIZE; ++j) {
kernel[i][j] = in_kernel[i * KERNEL_SIZE + j];
}
}
__syncthreads();
//determine id of thread which corresponds to an individual pixel
int pixNum = blockIdx.x * blockDim.x + threadIdx.x;
if (pixNum >= 0 && pixNum < rows * cols) {
double kernelSum;
double redPixelVal;
double greenPixelVal;
double bluePixelVal;
//Apply Kernel to each pixel of image
for (int i = 0; i < KERNEL_SIZE; ++i) {
for (int j = 0; j < KERNEL_SIZE; ++j) {
//check edge cases, if within bounds, apply filter
if (((pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)) >= 0)
&& ((pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)) <= rows*cols-1)
&& (((pixNum % cols) + j - ((KERNEL_SIZE-1)/2)) >= 0)
&& (((pixNum % cols) + j - ((KERNEL_SIZE-1)/2)) <= (cols-1))) {
redPixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].x;
greenPixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].y;
bluePixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].z;
kernelSum += kernel[i][j];
}
}
}
//update output image
out_pixels[pixNum].x = redPixelVal / kernelSum;
out_pixels[pixNum].y = greenPixelVal / kernelSum;
out_pixels[pixNum].z = bluePixelVal / kernelSum;
}
}
//*****************************************************************************************
// CUDA Intensity Gradient Implementation
//*****************************************************************************************
///
/// \brief Compute gradient (first order derivative x and y). This is the CUDA kernel for taking the derivative of color contrasts in adjacent images.
///
__global__
void cu_compute_intensity_gradient(float3 *in_pixels, float *deltaX_channel, float *deltaY_channel, int parser_length, int offset)
{
// compute delta X ***************************
// deltaX = f(x+1) - f(x-1)
int idx = blockIdx.x * blockDim.x + threadIdx.x;
/* condition here skips first and last row */
if ((idx > offset) && (idx < (parser_length * offset) - offset))
{
float deltaXred = 0;
float deltaYred = 0;
float deltaXgreen = 0;
float deltaYgreen = 0;
float deltaXblue = 0;
float deltaYblue = 0;
/* first column */
if((idx % offset) == 0)
{
// gradient at the first pixel of each line
// note: at the edge pix[idx-1] does NOT exist
deltaXred = (float)(in_pixels[idx+1].x - in_pixels[idx].x);
deltaXgreen = (float)(in_pixels[idx+1].y - in_pixels[idx].y);
deltaXblue = (float)(in_pixels[idx+1].z - in_pixels[idx].z);
// gradient at the first pixel of each line
// note: at the edge pix[idx-1] does NOT exist
deltaYred = (float)(in_pixels[idx+offset].x - in_pixels[idx].x);
deltaYgreen = (float)(in_pixels[idx+offset].y - in_pixels[idx].y);
deltaYblue = (float)(in_pixels[idx+offset].z - in_pixels[idx].z);
}
/* last column */
else if((idx % offset) == (offset - 1))
{
deltaXred = (float)(in_pixels[idx].x - in_pixels[idx-1].x);
deltaXgreen = (float)(in_pixels[idx].y - in_pixels[idx-1].y);
deltaXblue = (float)(in_pixels[idx].z - in_pixels[idx-1].z);
deltaYred = (float)(in_pixels[idx].x - in_pixels[idx-offset].x);
deltaYgreen = (float)(in_pixels[idx].y - in_pixels[idx-offset].y);
deltaYblue = (float)(in_pixels[idx].z - in_pixels[idx-offset].z);
}
/* gradients where NOT edge */
else
{
deltaXred = (float)(in_pixels[idx+1].x - in_pixels[idx-1].x);
deltaXgreen = (float)(in_pixels[idx+1].y - in_pixels[idx-1].y);
deltaXblue = (float)(in_pixels[idx+1].z - in_pixels[idx-1].z);
deltaYred = (float)(in_pixels[idx+offset].x - in_pixels[idx-offset].x);
deltaYgreen = (float)(in_pixels[idx+offset].y - in_pixels[idx-offset].y);
deltaYblue = (float)(in_pixels[idx+offset].z - in_pixels[idx-offset].z);
}
deltaX_channel[idx] = (float)(0.2989 * deltaXred + 0.5870 * deltaXgreen + 0.1140 * deltaXblue);
deltaY_channel[idx] = (float)(0.2989 * deltaYred + 0.5870 * deltaYgreen + 0.1140 * deltaYblue);
}
}
//*****************************************************************************************
// CUDA Gradient Magnitude Implementation
//*****************************************************************************************
///
/// \brief Compute magnitude of gradient(deltaX & deltaY) per pixel.
///
__global__
void cu_magnitude(float *deltaX, float *deltaY, float *out_pixel, int parser_length, int offset)
{
//computation
//Assigned a thread to each pixel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= 0 && idx < parser_length * offset) {
out_pixel[idx] = (float)(sqrt((double)deltaX[idx]*deltaX[idx] +
(double)deltaY[idx]*deltaY[idx]) + 0.5);
}
}
//*****************************************************************************************
// CUDA Non Maximal Suppression Implementation
//*****************************************************************************************
///
/// \brief Non Maximal Suppression
/// If the centre pixel is not greater than neighboured pixels in the direction,
/// then the center pixel is set to zero.
/// This process results in one pixel wide ridges.
///
__global__ void cu_suppress_non_max(float *mag, float *deltaX, float *deltaY, float *nms, int parser_length, int offset)
{
const float SUPPRESSED = 0;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= 0 && idx < parser_length * offset)
{
float alpha;
float mag1, mag2;
// put zero all boundaries of image
// TOP edge line of the image
if((idx >= 0) && (idx <offset))
nms[idx] = 0;
// BOTTOM edge line of image
else if((idx >= (parser_length-1)*offset) && (idx < (offset * parser_length)))
nms[idx] = 0;
// LEFT & RIGHT edge line
else if(((idx % offset)==0) || ((idx % offset)==(offset - 1)))
{
nms[idx] = 0;
}
else // not the boundaries
{
// if magnitude = 0, no edge
if(mag[idx] == 0)
nms[idx] = SUPPRESSED;
else{
if(deltaX[idx] >= 0)
{
if(deltaY[idx] >= 0) // dx >= 0, dy >= 0
{
if((deltaX[idx] - deltaY[idx]) >= 0) // direction 1 (SEE, South-East-East)
{
alpha = (float)deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx+1] + alpha*mag[idx+offset+1];
mag2 = (1-alpha)*mag[idx-1] + alpha*mag[idx-offset-1];
}
else // direction 2 (SSE)
{
alpha = (float)deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset+1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset-1];
}
}
else // dx >= 0, dy < 0
{
if((deltaX[idx] + deltaY[idx]) >= 0) // direction 8 (NEE)
{
alpha = (float)-deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx+1] + alpha*mag[idx-offset+1];
mag2 = (1-alpha)*mag[idx-1] + alpha*mag[idx+offset-1];
}
else // direction 7 (NNE)
{
alpha = (float)deltaX[idx] / -deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset+1];
}
}
}
else
{
if(deltaY[idx] >= 0) // dx < 0, dy >= 0
{
if((deltaX[idx] + deltaY[idx]) >= 0) // direction 3 (SSW)
{
alpha = (float)-deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset+1];
}
else // direction 4 (SWW)
{
alpha = (float)deltaY[idx] / -deltaX[idx];
mag1 = (1-alpha)*mag[idx-1] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx+1] + alpha*mag[idx-offset+1];
}
}
else // dx < 0, dy < 0
{
if((-deltaX[idx] + deltaY[idx]) >= 0) // direction 5 (NWW)
{
alpha = (float)deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx-1] + alpha*mag[idx-offset-1];
mag2 = (1-alpha)*mag[idx+1] + alpha*mag[idx+offset+1];
}
else // direction 6 (NNW)
{
alpha = (float)deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset-1];
mag2 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset+1];
}
}
}
// non-maximal suppression
// compare mag1, mag2 and mag[t]
// if mag[t] is smaller than one of the neighbours then suppress it
if((mag[idx] < mag1) || (mag[idx] < mag2))
nms[idx] = SUPPRESSED;
else
{
nms[idx] = mag[idx];
}
} // END OF ELSE (mag != 0)
} // END OF FOR(j)
} // END OF FOR(i)
}
//*****************************************************************************************
// CUDA Hysteresis Implementation
//*****************************************************************************************
///
/// \brief This is a helper function that runs on the GPU.
///
/// It checks if the eight immediate neighbors of a pixel at a given index are above
/// a low threshold, and if they are, sets them to strong edges. This effectively
/// connects the edges.
///
__device__
void trace_immed_neighbors(float *out_pixels, float *in_pixels,
int idx, float t_low, int img_width)
{
/* directions representing indices of neighbors */
unsigned n, s, e, w;
unsigned nw, ne, sw, se;
/* get indices */
n = idx - img_width;
nw = n - 1;
ne = n + 1;
s = idx + img_width;
sw = s - 1;
se = s + 1;
w = idx - 1;
e = idx + 1;
if (in_pixels[nw] >= t_low &&in_pixels[nw]!=255.0 ) {
out_pixels[nw] = STRONG_EDGE;
}
if (in_pixels[n] >= t_low&&in_pixels[n]!=255.0) {
out_pixels[n] = STRONG_EDGE;
}
if (in_pixels[ne] >= t_low&&in_pixels[ne]!=255.0) {
out_pixels[ne] = STRONG_EDGE;
}
if (in_pixels[w] >= t_low&&in_pixels[w]!=255.0) {
out_pixels[w] = STRONG_EDGE;
}
if (in_pixels[e] >= t_low&&in_pixels[e]!=255.0) {
out_pixels[e] = STRONG_EDGE;
}
if (in_pixels[sw] >= t_low&&in_pixels[sw]!=255.0) {
out_pixels[sw] = STRONG_EDGE;
}
if (in_pixels[s] >= t_low&&in_pixels[s]!=255.0) {
out_pixels[s] = STRONG_EDGE;
}
if (in_pixels[se] >= t_low&&in_pixels[se]!=255.0) {
out_pixels[se] = STRONG_EDGE;
}
}
///
/// \brief CUDA implementation of Canny hysteresis high thresholding.
///
/// This kernel is the first pass in the parallel hysteresis step.
/// It launches a thread for every pixel and checks if the value of that pixel
/// is above a high threshold. If it is, the thread marks it as a strong edge (set to 1)
/// in a pixel map and sets the value to the channel max. If it is not, the thread sets
/// the pixel map at the index to 0 and zeros the output buffer space at that index.
///
/// The output of this step is a mask of strong edges and an output buffer with white values
/// at the mask indices which are set.
///
__global__
void cu_hysteresis_high(float *out_pixels, float *in_pixels, float *strong_edge_mask,
float t_high, int img_height, int img_width)
{
//printf("t_high=%f\n",t_high);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < (img_height * img_width)) {
/* apply high threshold */
//printf("pixel=%f\n",in_pixels[idx]);
if (in_pixels[idx] > t_high) {
strong_edge_mask[idx] = 1.0;
out_pixels[idx] = STRONG_EDGE;
} else {
strong_edge_mask[idx] = 0.0;
out_pixels[idx] = NON_EDGE;
}
}
}
///
/// \brief CUDA implementation of Canny hysteresis low thresholding.
///
/// This kernel is the second pass in the parallel hysteresis step.
/// It launches a thread for every pixel, but skips the first and last rows and columns.
/// For surviving threads, the pixel at the thread ID index is checked to see if it was
/// previously marked as a strong edge in the first pass. If it was, the thread checks
/// their eight immediate neighbors and connects them (marks them as strong edges)
/// if the neighbor is above the low threshold.
///
/// The output of this step is an output buffer with both "strong" and "connected" edges
/// set to whtie values. This is the final edge detected image.
///
__global__
void cu_hysteresis_low(float *out_pixels, float *in_pixels, float *strong_edge_mask,
float t_low, int img_height, int img_width)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ((idx > img_width) /* skip first row */
&& (idx < (img_height * img_width) - img_width) /* skip last row */
&& ((idx % img_width) < (img_width - 1)) /* skip last column */
&& ((idx % img_width) > (0)) ) /* skip first column */
{
if (1.0 == strong_edge_mask[idx]) { /* if this pixel was previously found to be a strong edge */
trace_immed_neighbors(out_pixels, in_pixels, idx, t_low, img_width);
}
}
}
__global__ void hysteresis_kernel(float* out_pixels,float * in_pixels,float t_low,float t_high,int img_height,int img_width){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ((idx > img_width) &&
(idx < (img_height * img_width) - img_width) &&
((idx % img_width) < (img_width - 1)) &&
((idx % img_width) > (0)) ){
float pixel = in_pixels[idx];
if (pixel != 255.0){
if (pixel > t_high){
out_pixels[idx] = (float)255.0;
trace_immed_neighbors(out_pixels,in_pixels,idx,t_low,img_width);
}else{
out_pixels[idx] = 0.0;
}
}
}
}
} | .file "tmpxft_000fbdaf_00000000-6_27-canny.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl trace_immed_neighbors
.type trace_immed_neighbors, @function
trace_immed_neighbors:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size trace_immed_neighbors, .-trace_immed_neighbors
.globl _Z57__device_stub__Z24cu_apply_gaussian_filterP6float3S0_iiPdP6float3S0_iiPd
.type _Z57__device_stub__Z24cu_apply_gaussian_filterP6float3S0_iiPdP6float3S0_iiPd, @function
_Z57__device_stub__Z24cu_apply_gaussian_filterP6float3S0_iiPdP6float3S0_iiPd:
.LFB2052:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq cu_apply_gaussian_filter(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z57__device_stub__Z24cu_apply_gaussian_filterP6float3S0_iiPdP6float3S0_iiPd, .-_Z57__device_stub__Z24cu_apply_gaussian_filterP6float3S0_iiPdP6float3S0_iiPd
.globl cu_apply_gaussian_filter
.type cu_apply_gaussian_filter, @function
cu_apply_gaussian_filter:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z57__device_stub__Z24cu_apply_gaussian_filterP6float3S0_iiPdP6float3S0_iiPd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size cu_apply_gaussian_filter, .-cu_apply_gaussian_filter
.globl _Z62__device_stub__Z29cu_compute_intensity_gradientP6float3PfS1_iiP6float3PfS1_ii
.type _Z62__device_stub__Z29cu_compute_intensity_gradientP6float3PfS1_iiP6float3PfS1_ii, @function
_Z62__device_stub__Z29cu_compute_intensity_gradientP6float3PfS1_iiP6float3PfS1_ii:
.LFB2054:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq cu_compute_intensity_gradient(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2054:
.size _Z62__device_stub__Z29cu_compute_intensity_gradientP6float3PfS1_iiP6float3PfS1_ii, .-_Z62__device_stub__Z29cu_compute_intensity_gradientP6float3PfS1_iiP6float3PfS1_ii
.globl cu_compute_intensity_gradient
.type cu_compute_intensity_gradient, @function
cu_compute_intensity_gradient:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z62__device_stub__Z29cu_compute_intensity_gradientP6float3PfS1_iiP6float3PfS1_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size cu_compute_intensity_gradient, .-cu_compute_intensity_gradient
.globl _Z38__device_stub__Z12cu_magnitudePfS_S_iiPfS_S_ii
.type _Z38__device_stub__Z12cu_magnitudePfS_S_iiPfS_S_ii, @function
_Z38__device_stub__Z12cu_magnitudePfS_S_iiPfS_S_ii:
.LFB2056:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L25
.L21:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq cu_magnitude(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L21
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2056:
.size _Z38__device_stub__Z12cu_magnitudePfS_S_iiPfS_S_ii, .-_Z38__device_stub__Z12cu_magnitudePfS_S_iiPfS_S_ii
.globl cu_magnitude
.type cu_magnitude, @function
cu_magnitude:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z12cu_magnitudePfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size cu_magnitude, .-cu_magnitude
.globl _Z47__device_stub__Z19cu_suppress_non_maxPfS_S_S_iiPfS_S_S_ii
.type _Z47__device_stub__Z19cu_suppress_non_maxPfS_S_S_iiPfS_S_S_ii, @function
_Z47__device_stub__Z19cu_suppress_non_maxPfS_S_S_iiPfS_S_S_ii:
.LFB2058:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq cu_suppress_non_max(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z47__device_stub__Z19cu_suppress_non_maxPfS_S_S_iiPfS_S_S_ii, .-_Z47__device_stub__Z19cu_suppress_non_maxPfS_S_S_iiPfS_S_S_ii
.globl cu_suppress_non_max
.type cu_suppress_non_max, @function
cu_suppress_non_max:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z47__device_stub__Z19cu_suppress_non_maxPfS_S_S_iiPfS_S_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size cu_suppress_non_max, .-cu_suppress_non_max
.globl _Z45__device_stub__Z18cu_hysteresis_highPfS_S_fiiPfS_S_fii
.type _Z45__device_stub__Z18cu_hysteresis_highPfS_S_fiiPfS_S_fii, @function
_Z45__device_stub__Z18cu_hysteresis_highPfS_S_fiiPfS_S_fii:
.LFB2060:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movss %xmm0, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L41
.L37:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L42
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq cu_hysteresis_high(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L37
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size _Z45__device_stub__Z18cu_hysteresis_highPfS_S_fiiPfS_S_fii, .-_Z45__device_stub__Z18cu_hysteresis_highPfS_S_fiiPfS_S_fii
.globl cu_hysteresis_high
.type cu_hysteresis_high, @function
cu_hysteresis_high:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z18cu_hysteresis_highPfS_S_fiiPfS_S_fii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size cu_hysteresis_high, .-cu_hysteresis_high
.globl _Z44__device_stub__Z17cu_hysteresis_lowPfS_S_fiiPfS_S_fii
.type _Z44__device_stub__Z17cu_hysteresis_lowPfS_S_fiiPfS_S_fii, @function
_Z44__device_stub__Z17cu_hysteresis_lowPfS_S_fiiPfS_S_fii:
.LFB2062:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movss %xmm0, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L49
.L45:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L50
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L49:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq cu_hysteresis_low(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L45
.L50:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2062:
.size _Z44__device_stub__Z17cu_hysteresis_lowPfS_S_fiiPfS_S_fii, .-_Z44__device_stub__Z17cu_hysteresis_lowPfS_S_fiiPfS_S_fii
.globl cu_hysteresis_low
.type cu_hysteresis_low, @function
cu_hysteresis_low:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z17cu_hysteresis_lowPfS_S_fiiPfS_S_fii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size cu_hysteresis_low, .-cu_hysteresis_low
.globl _Z43__device_stub__Z17hysteresis_kernelPfS_ffiiPfS_ffii
.type _Z43__device_stub__Z17hysteresis_kernelPfS_ffiiPfS_ffii, @function
_Z43__device_stub__Z17hysteresis_kernelPfS_ffiiPfS_ffii:
.LFB2064:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movss %xmm1, 8(%rsp)
movl %edx, 4(%rsp)
movl %ecx, (%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L57
.L53:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L58
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L57:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 184
pushq 40(%rsp)
.cfi_def_cfa_offset 192
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq hysteresis_kernel(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L53
.L58:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2064:
.size _Z43__device_stub__Z17hysteresis_kernelPfS_ffiiPfS_ffii, .-_Z43__device_stub__Z17hysteresis_kernelPfS_ffiiPfS_ffii
.globl hysteresis_kernel
.type hysteresis_kernel, @function
hysteresis_kernel:
.LFB2065:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z43__device_stub__Z17hysteresis_kernelPfS_ffiiPfS_ffii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2065:
.size hysteresis_kernel, .-hysteresis_kernel
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "hysteresis_kernel"
.LC1:
.string "cu_hysteresis_low"
.LC2:
.string "cu_hysteresis_high"
.LC3:
.string "cu_suppress_non_max"
.LC4:
.string "cu_magnitude"
.LC5:
.string "cu_compute_intensity_gradient"
.LC6:
.string "cu_apply_gaussian_filter"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2067:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq hysteresis_kernel(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq cu_hysteresis_low(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq cu_hysteresis_high(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq cu_suppress_non_max(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq cu_magnitude(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq cu_compute_intensity_gradient(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq cu_apply_gaussian_filter(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2067:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | extern "C"{
#define RGB2GRAY_CONST_ARR_SIZE 3
#define STRONG_EDGE 255
#define NON_EDGE 0.0
#define KERNEL_SIZE 7
//*****************************************************************************************
// CUDA Gaussian Filter Implementation
//*****************************************************************************************
///
/// \brief Apply gaussian filter. This is the CUDA kernel for applying a gaussian blur to an image.
///
__global__ void cu_apply_gaussian_filter(float3 *in_pixels, float3 *out_pixels, int rows, int cols, double *in_kernel)
{
//copy kernel array from global memory to a shared array
__shared__ double kernel[KERNEL_SIZE][KERNEL_SIZE];
for (int i = 0; i < KERNEL_SIZE; ++i) {
for (int j = 0; j < KERNEL_SIZE; ++j) {
kernel[i][j] = in_kernel[i * KERNEL_SIZE + j];
}
}
__syncthreads();
//determine id of thread which corresponds to an individual pixel
int pixNum = blockIdx.x * blockDim.x + threadIdx.x;
if (pixNum >= 0 && pixNum < rows * cols) {
double kernelSum;
double redPixelVal;
double greenPixelVal;
double bluePixelVal;
//Apply Kernel to each pixel of image
for (int i = 0; i < KERNEL_SIZE; ++i) {
for (int j = 0; j < KERNEL_SIZE; ++j) {
//check edge cases, if within bounds, apply filter
if (((pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)) >= 0)
&& ((pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)) <= rows*cols-1)
&& (((pixNum % cols) + j - ((KERNEL_SIZE-1)/2)) >= 0)
&& (((pixNum % cols) + j - ((KERNEL_SIZE-1)/2)) <= (cols-1))) {
redPixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].x;
greenPixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].y;
bluePixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].z;
kernelSum += kernel[i][j];
}
}
}
//update output image
out_pixels[pixNum].x = redPixelVal / kernelSum;
out_pixels[pixNum].y = greenPixelVal / kernelSum;
out_pixels[pixNum].z = bluePixelVal / kernelSum;
}
}
//*****************************************************************************************
// CUDA Intensity Gradient Implementation
//*****************************************************************************************
///
/// \brief Compute gradient (first order derivative x and y). This is the CUDA kernel for taking the derivative of color contrasts in adjacent images.
///
__global__
void cu_compute_intensity_gradient(float3 *in_pixels, float *deltaX_channel, float *deltaY_channel, int parser_length, int offset)
{
// compute delta X ***************************
// deltaX = f(x+1) - f(x-1)
int idx = blockIdx.x * blockDim.x + threadIdx.x;
/* condition here skips first and last row */
if ((idx > offset) && (idx < (parser_length * offset) - offset))
{
float deltaXred = 0;
float deltaYred = 0;
float deltaXgreen = 0;
float deltaYgreen = 0;
float deltaXblue = 0;
float deltaYblue = 0;
/* first column */
if((idx % offset) == 0)
{
// gradient at the first pixel of each line
// note: at the edge pix[idx-1] does NOT exist
deltaXred = (float)(in_pixels[idx+1].x - in_pixels[idx].x);
deltaXgreen = (float)(in_pixels[idx+1].y - in_pixels[idx].y);
deltaXblue = (float)(in_pixels[idx+1].z - in_pixels[idx].z);
// gradient at the first pixel of each line
// note: at the edge pix[idx-1] does NOT exist
deltaYred = (float)(in_pixels[idx+offset].x - in_pixels[idx].x);
deltaYgreen = (float)(in_pixels[idx+offset].y - in_pixels[idx].y);
deltaYblue = (float)(in_pixels[idx+offset].z - in_pixels[idx].z);
}
/* last column */
else if((idx % offset) == (offset - 1))
{
deltaXred = (float)(in_pixels[idx].x - in_pixels[idx-1].x);
deltaXgreen = (float)(in_pixels[idx].y - in_pixels[idx-1].y);
deltaXblue = (float)(in_pixels[idx].z - in_pixels[idx-1].z);
deltaYred = (float)(in_pixels[idx].x - in_pixels[idx-offset].x);
deltaYgreen = (float)(in_pixels[idx].y - in_pixels[idx-offset].y);
deltaYblue = (float)(in_pixels[idx].z - in_pixels[idx-offset].z);
}
/* gradients where NOT edge */
else
{
deltaXred = (float)(in_pixels[idx+1].x - in_pixels[idx-1].x);
deltaXgreen = (float)(in_pixels[idx+1].y - in_pixels[idx-1].y);
deltaXblue = (float)(in_pixels[idx+1].z - in_pixels[idx-1].z);
deltaYred = (float)(in_pixels[idx+offset].x - in_pixels[idx-offset].x);
deltaYgreen = (float)(in_pixels[idx+offset].y - in_pixels[idx-offset].y);
deltaYblue = (float)(in_pixels[idx+offset].z - in_pixels[idx-offset].z);
}
deltaX_channel[idx] = (float)(0.2989 * deltaXred + 0.5870 * deltaXgreen + 0.1140 * deltaXblue);
deltaY_channel[idx] = (float)(0.2989 * deltaYred + 0.5870 * deltaYgreen + 0.1140 * deltaYblue);
}
}
//*****************************************************************************************
// CUDA Gradient Magnitude Implementation
//*****************************************************************************************
///
/// \brief Compute magnitude of gradient(deltaX & deltaY) per pixel.
///
__global__
void cu_magnitude(float *deltaX, float *deltaY, float *out_pixel, int parser_length, int offset)
{
//computation
//Assigned a thread to each pixel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= 0 && idx < parser_length * offset) {
out_pixel[idx] = (float)(sqrt((double)deltaX[idx]*deltaX[idx] +
(double)deltaY[idx]*deltaY[idx]) + 0.5);
}
}
//*****************************************************************************************
// CUDA Non Maximal Suppression Implementation
//*****************************************************************************************
///
/// \brief Non Maximal Suppression
/// If the centre pixel is not greater than neighboured pixels in the direction,
/// then the center pixel is set to zero.
/// This process results in one pixel wide ridges.
///
__global__ void cu_suppress_non_max(float *mag, float *deltaX, float *deltaY, float *nms, int parser_length, int offset)
{
const float SUPPRESSED = 0;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= 0 && idx < parser_length * offset)
{
float alpha;
float mag1, mag2;
// put zero all boundaries of image
// TOP edge line of the image
if((idx >= 0) && (idx <offset))
nms[idx] = 0;
// BOTTOM edge line of image
else if((idx >= (parser_length-1)*offset) && (idx < (offset * parser_length)))
nms[idx] = 0;
// LEFT & RIGHT edge line
else if(((idx % offset)==0) || ((idx % offset)==(offset - 1)))
{
nms[idx] = 0;
}
else // not the boundaries
{
// if magnitude = 0, no edge
if(mag[idx] == 0)
nms[idx] = SUPPRESSED;
else{
if(deltaX[idx] >= 0)
{
if(deltaY[idx] >= 0) // dx >= 0, dy >= 0
{
if((deltaX[idx] - deltaY[idx]) >= 0) // direction 1 (SEE, South-East-East)
{
alpha = (float)deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx+1] + alpha*mag[idx+offset+1];
mag2 = (1-alpha)*mag[idx-1] + alpha*mag[idx-offset-1];
}
else // direction 2 (SSE)
{
alpha = (float)deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset+1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset-1];
}
}
else // dx >= 0, dy < 0
{
if((deltaX[idx] + deltaY[idx]) >= 0) // direction 8 (NEE)
{
alpha = (float)-deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx+1] + alpha*mag[idx-offset+1];
mag2 = (1-alpha)*mag[idx-1] + alpha*mag[idx+offset-1];
}
else // direction 7 (NNE)
{
alpha = (float)deltaX[idx] / -deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset+1];
}
}
}
else
{
if(deltaY[idx] >= 0) // dx < 0, dy >= 0
{
if((deltaX[idx] + deltaY[idx]) >= 0) // direction 3 (SSW)
{
alpha = (float)-deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset+1];
}
else // direction 4 (SWW)
{
alpha = (float)deltaY[idx] / -deltaX[idx];
mag1 = (1-alpha)*mag[idx-1] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx+1] + alpha*mag[idx-offset+1];
}
}
else // dx < 0, dy < 0
{
if((-deltaX[idx] + deltaY[idx]) >= 0) // direction 5 (NWW)
{
alpha = (float)deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx-1] + alpha*mag[idx-offset-1];
mag2 = (1-alpha)*mag[idx+1] + alpha*mag[idx+offset+1];
}
else // direction 6 (NNW)
{
alpha = (float)deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset-1];
mag2 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset+1];
}
}
}
// non-maximal suppression
// compare mag1, mag2 and mag[t]
// if mag[t] is smaller than one of the neighbours then suppress it
if((mag[idx] < mag1) || (mag[idx] < mag2))
nms[idx] = SUPPRESSED;
else
{
nms[idx] = mag[idx];
}
} // END OF ELSE (mag != 0)
} // END OF FOR(j)
} // END OF FOR(i)
}
//*****************************************************************************************
// CUDA Hysteresis Implementation
//*****************************************************************************************
///
/// \brief This is a helper function that runs on the GPU.
///
/// It checks if the eight immediate neighbors of a pixel at a given index are above
/// a low threshold, and if they are, sets them to strong edges. This effectively
/// connects the edges.
///
__device__
void trace_immed_neighbors(float *out_pixels, float *in_pixels,
int idx, float t_low, int img_width)
{
/* directions representing indices of neighbors */
unsigned n, s, e, w;
unsigned nw, ne, sw, se;
/* get indices */
n = idx - img_width;
nw = n - 1;
ne = n + 1;
s = idx + img_width;
sw = s - 1;
se = s + 1;
w = idx - 1;
e = idx + 1;
if (in_pixels[nw] >= t_low &&in_pixels[nw]!=255.0 ) {
out_pixels[nw] = STRONG_EDGE;
}
if (in_pixels[n] >= t_low&&in_pixels[n]!=255.0) {
out_pixels[n] = STRONG_EDGE;
}
if (in_pixels[ne] >= t_low&&in_pixels[ne]!=255.0) {
out_pixels[ne] = STRONG_EDGE;
}
if (in_pixels[w] >= t_low&&in_pixels[w]!=255.0) {
out_pixels[w] = STRONG_EDGE;
}
if (in_pixels[e] >= t_low&&in_pixels[e]!=255.0) {
out_pixels[e] = STRONG_EDGE;
}
if (in_pixels[sw] >= t_low&&in_pixels[sw]!=255.0) {
out_pixels[sw] = STRONG_EDGE;
}
if (in_pixels[s] >= t_low&&in_pixels[s]!=255.0) {
out_pixels[s] = STRONG_EDGE;
}
if (in_pixels[se] >= t_low&&in_pixels[se]!=255.0) {
out_pixels[se] = STRONG_EDGE;
}
}
///
/// \brief CUDA implementation of Canny hysteresis high thresholding.
///
/// This kernel is the first pass in the parallel hysteresis step.
/// It launches a thread for every pixel and checks if the value of that pixel
/// is above a high threshold. If it is, the thread marks it as a strong edge (set to 1)
/// in a pixel map and sets the value to the channel max. If it is not, the thread sets
/// the pixel map at the index to 0 and zeros the output buffer space at that index.
///
/// The output of this step is a mask of strong edges and an output buffer with white values
/// at the mask indices which are set.
///
__global__
void cu_hysteresis_high(float *out_pixels, float *in_pixels, float *strong_edge_mask,
float t_high, int img_height, int img_width)
{
//printf("t_high=%f\n",t_high);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < (img_height * img_width)) {
/* apply high threshold */
//printf("pixel=%f\n",in_pixels[idx]);
if (in_pixels[idx] > t_high) {
strong_edge_mask[idx] = 1.0;
out_pixels[idx] = STRONG_EDGE;
} else {
strong_edge_mask[idx] = 0.0;
out_pixels[idx] = NON_EDGE;
}
}
}
///
/// \brief CUDA implementation of Canny hysteresis low thresholding.
///
/// This kernel is the second pass in the parallel hysteresis step.
/// It launches a thread for every pixel, but skips the first and last rows and columns.
/// For surviving threads, the pixel at the thread ID index is checked to see if it was
/// previously marked as a strong edge in the first pass. If it was, the thread checks
/// their eight immediate neighbors and connects them (marks them as strong edges)
/// if the neighbor is above the low threshold.
///
/// The output of this step is an output buffer with both "strong" and "connected" edges
/// set to whtie values. This is the final edge detected image.
///
__global__
void cu_hysteresis_low(float *out_pixels, float *in_pixels, float *strong_edge_mask,
float t_low, int img_height, int img_width)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ((idx > img_width) /* skip first row */
&& (idx < (img_height * img_width) - img_width) /* skip last row */
&& ((idx % img_width) < (img_width - 1)) /* skip last column */
&& ((idx % img_width) > (0)) ) /* skip first column */
{
if (1.0 == strong_edge_mask[idx]) { /* if this pixel was previously found to be a strong edge */
trace_immed_neighbors(out_pixels, in_pixels, idx, t_low, img_width);
}
}
}
__global__ void hysteresis_kernel(float* out_pixels,float * in_pixels,float t_low,float t_high,int img_height,int img_width){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ((idx > img_width) &&
(idx < (img_height * img_width) - img_width) &&
((idx % img_width) < (img_width - 1)) &&
((idx % img_width) > (0)) ){
float pixel = in_pixels[idx];
if (pixel != 255.0){
if (pixel > t_high){
out_pixels[idx] = (float)255.0;
trace_immed_neighbors(out_pixels,in_pixels,idx,t_low,img_width);
}else{
out_pixels[idx] = 0.0;
}
}
}
}
} | #include <hip/hip_runtime.h>
extern "C"{
#define RGB2GRAY_CONST_ARR_SIZE 3
#define STRONG_EDGE 255
#define NON_EDGE 0.0
#define KERNEL_SIZE 7
//*****************************************************************************************
// CUDA Gaussian Filter Implementation
//*****************************************************************************************
///
/// \brief Apply gaussian filter. This is the CUDA kernel for applying a gaussian blur to an image.
///
__global__ void cu_apply_gaussian_filter(float3 *in_pixels, float3 *out_pixels, int rows, int cols, double *in_kernel)
{
//copy kernel array from global memory to a shared array
__shared__ double kernel[KERNEL_SIZE][KERNEL_SIZE];
for (int i = 0; i < KERNEL_SIZE; ++i) {
for (int j = 0; j < KERNEL_SIZE; ++j) {
kernel[i][j] = in_kernel[i * KERNEL_SIZE + j];
}
}
__syncthreads();
//determine id of thread which corresponds to an individual pixel
int pixNum = blockIdx.x * blockDim.x + threadIdx.x;
if (pixNum >= 0 && pixNum < rows * cols) {
double kernelSum;
double redPixelVal;
double greenPixelVal;
double bluePixelVal;
//Apply Kernel to each pixel of image
for (int i = 0; i < KERNEL_SIZE; ++i) {
for (int j = 0; j < KERNEL_SIZE; ++j) {
//check edge cases, if within bounds, apply filter
if (((pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)) >= 0)
&& ((pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)) <= rows*cols-1)
&& (((pixNum % cols) + j - ((KERNEL_SIZE-1)/2)) >= 0)
&& (((pixNum % cols) + j - ((KERNEL_SIZE-1)/2)) <= (cols-1))) {
redPixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].x;
greenPixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].y;
bluePixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].z;
kernelSum += kernel[i][j];
}
}
}
//update output image
out_pixels[pixNum].x = redPixelVal / kernelSum;
out_pixels[pixNum].y = greenPixelVal / kernelSum;
out_pixels[pixNum].z = bluePixelVal / kernelSum;
}
}
//*****************************************************************************************
// CUDA Intensity Gradient Implementation
//*****************************************************************************************
///
/// \brief Compute gradient (first order derivative x and y). This is the CUDA kernel for taking the derivative of color contrasts in adjacent images.
///
__global__
void cu_compute_intensity_gradient(float3 *in_pixels, float *deltaX_channel, float *deltaY_channel, int parser_length, int offset)
{
// compute delta X ***************************
// deltaX = f(x+1) - f(x-1)
int idx = blockIdx.x * blockDim.x + threadIdx.x;
/* condition here skips first and last row */
if ((idx > offset) && (idx < (parser_length * offset) - offset))
{
float deltaXred = 0;
float deltaYred = 0;
float deltaXgreen = 0;
float deltaYgreen = 0;
float deltaXblue = 0;
float deltaYblue = 0;
/* first column */
if((idx % offset) == 0)
{
// gradient at the first pixel of each line
// note: at the edge pix[idx-1] does NOT exist
deltaXred = (float)(in_pixels[idx+1].x - in_pixels[idx].x);
deltaXgreen = (float)(in_pixels[idx+1].y - in_pixels[idx].y);
deltaXblue = (float)(in_pixels[idx+1].z - in_pixels[idx].z);
// gradient at the first pixel of each line
// note: at the edge pix[idx-1] does NOT exist
deltaYred = (float)(in_pixels[idx+offset].x - in_pixels[idx].x);
deltaYgreen = (float)(in_pixels[idx+offset].y - in_pixels[idx].y);
deltaYblue = (float)(in_pixels[idx+offset].z - in_pixels[idx].z);
}
/* last column */
else if((idx % offset) == (offset - 1))
{
deltaXred = (float)(in_pixels[idx].x - in_pixels[idx-1].x);
deltaXgreen = (float)(in_pixels[idx].y - in_pixels[idx-1].y);
deltaXblue = (float)(in_pixels[idx].z - in_pixels[idx-1].z);
deltaYred = (float)(in_pixels[idx].x - in_pixels[idx-offset].x);
deltaYgreen = (float)(in_pixels[idx].y - in_pixels[idx-offset].y);
deltaYblue = (float)(in_pixels[idx].z - in_pixels[idx-offset].z);
}
/* gradients where NOT edge */
else
{
deltaXred = (float)(in_pixels[idx+1].x - in_pixels[idx-1].x);
deltaXgreen = (float)(in_pixels[idx+1].y - in_pixels[idx-1].y);
deltaXblue = (float)(in_pixels[idx+1].z - in_pixels[idx-1].z);
deltaYred = (float)(in_pixels[idx+offset].x - in_pixels[idx-offset].x);
deltaYgreen = (float)(in_pixels[idx+offset].y - in_pixels[idx-offset].y);
deltaYblue = (float)(in_pixels[idx+offset].z - in_pixels[idx-offset].z);
}
deltaX_channel[idx] = (float)(0.2989 * deltaXred + 0.5870 * deltaXgreen + 0.1140 * deltaXblue);
deltaY_channel[idx] = (float)(0.2989 * deltaYred + 0.5870 * deltaYgreen + 0.1140 * deltaYblue);
}
}
//*****************************************************************************************
// CUDA Gradient Magnitude Implementation
//*****************************************************************************************
///
/// \brief Compute magnitude of gradient(deltaX & deltaY) per pixel.
///
__global__
void cu_magnitude(float *deltaX, float *deltaY, float *out_pixel, int parser_length, int offset)
{
//computation
//Assigned a thread to each pixel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= 0 && idx < parser_length * offset) {
out_pixel[idx] = (float)(sqrt((double)deltaX[idx]*deltaX[idx] +
(double)deltaY[idx]*deltaY[idx]) + 0.5);
}
}
//*****************************************************************************************
// CUDA Non Maximal Suppression Implementation
//*****************************************************************************************
///
/// \brief Non Maximal Suppression
/// If the centre pixel is not greater than neighboured pixels in the direction,
/// then the center pixel is set to zero.
/// This process results in one pixel wide ridges.
///
__global__ void cu_suppress_non_max(float *mag, float *deltaX, float *deltaY, float *nms, int parser_length, int offset)
{
const float SUPPRESSED = 0;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= 0 && idx < parser_length * offset)
{
float alpha;
float mag1, mag2;
// put zero all boundaries of image
// TOP edge line of the image
if((idx >= 0) && (idx <offset))
nms[idx] = 0;
// BOTTOM edge line of image
else if((idx >= (parser_length-1)*offset) && (idx < (offset * parser_length)))
nms[idx] = 0;
// LEFT & RIGHT edge line
else if(((idx % offset)==0) || ((idx % offset)==(offset - 1)))
{
nms[idx] = 0;
}
else // not the boundaries
{
// if magnitude = 0, no edge
if(mag[idx] == 0)
nms[idx] = SUPPRESSED;
else{
if(deltaX[idx] >= 0)
{
if(deltaY[idx] >= 0) // dx >= 0, dy >= 0
{
if((deltaX[idx] - deltaY[idx]) >= 0) // direction 1 (SEE, South-East-East)
{
alpha = (float)deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx+1] + alpha*mag[idx+offset+1];
mag2 = (1-alpha)*mag[idx-1] + alpha*mag[idx-offset-1];
}
else // direction 2 (SSE)
{
alpha = (float)deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset+1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset-1];
}
}
else // dx >= 0, dy < 0
{
if((deltaX[idx] + deltaY[idx]) >= 0) // direction 8 (NEE)
{
alpha = (float)-deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx+1] + alpha*mag[idx-offset+1];
mag2 = (1-alpha)*mag[idx-1] + alpha*mag[idx+offset-1];
}
else // direction 7 (NNE)
{
alpha = (float)deltaX[idx] / -deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset+1];
}
}
}
else
{
if(deltaY[idx] >= 0) // dx < 0, dy >= 0
{
if((deltaX[idx] + deltaY[idx]) >= 0) // direction 3 (SSW)
{
alpha = (float)-deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset+1];
}
else // direction 4 (SWW)
{
alpha = (float)deltaY[idx] / -deltaX[idx];
mag1 = (1-alpha)*mag[idx-1] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx+1] + alpha*mag[idx-offset+1];
}
}
else // dx < 0, dy < 0
{
if((-deltaX[idx] + deltaY[idx]) >= 0) // direction 5 (NWW)
{
alpha = (float)deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx-1] + alpha*mag[idx-offset-1];
mag2 = (1-alpha)*mag[idx+1] + alpha*mag[idx+offset+1];
}
else // direction 6 (NNW)
{
alpha = (float)deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset-1];
mag2 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset+1];
}
}
}
// non-maximal suppression
// compare mag1, mag2 and mag[t]
// if mag[t] is smaller than one of the neighbours then suppress it
if((mag[idx] < mag1) || (mag[idx] < mag2))
nms[idx] = SUPPRESSED;
else
{
nms[idx] = mag[idx];
}
} // END OF ELSE (mag != 0)
} // END OF FOR(j)
} // END OF FOR(i)
}
//*****************************************************************************************
// CUDA Hysteresis Implementation
//*****************************************************************************************
///
/// \brief This is a helper function that runs on the GPU.
///
/// It checks if the eight immediate neighbors of a pixel at a given index are above
/// a low threshold, and if they are, sets them to strong edges. This effectively
/// connects the edges.
///
__device__
void trace_immed_neighbors(float *out_pixels, float *in_pixels,
int idx, float t_low, int img_width)
{
/* directions representing indices of neighbors */
unsigned n, s, e, w;
unsigned nw, ne, sw, se;
/* get indices */
n = idx - img_width;
nw = n - 1;
ne = n + 1;
s = idx + img_width;
sw = s - 1;
se = s + 1;
w = idx - 1;
e = idx + 1;
if (in_pixels[nw] >= t_low &&in_pixels[nw]!=255.0 ) {
out_pixels[nw] = STRONG_EDGE;
}
if (in_pixels[n] >= t_low&&in_pixels[n]!=255.0) {
out_pixels[n] = STRONG_EDGE;
}
if (in_pixels[ne] >= t_low&&in_pixels[ne]!=255.0) {
out_pixels[ne] = STRONG_EDGE;
}
if (in_pixels[w] >= t_low&&in_pixels[w]!=255.0) {
out_pixels[w] = STRONG_EDGE;
}
if (in_pixels[e] >= t_low&&in_pixels[e]!=255.0) {
out_pixels[e] = STRONG_EDGE;
}
if (in_pixels[sw] >= t_low&&in_pixels[sw]!=255.0) {
out_pixels[sw] = STRONG_EDGE;
}
if (in_pixels[s] >= t_low&&in_pixels[s]!=255.0) {
out_pixels[s] = STRONG_EDGE;
}
if (in_pixels[se] >= t_low&&in_pixels[se]!=255.0) {
out_pixels[se] = STRONG_EDGE;
}
}
///
/// \brief CUDA implementation of Canny hysteresis high thresholding.
///
/// This kernel is the first pass in the parallel hysteresis step.
/// It launches a thread for every pixel and checks if the value of that pixel
/// is above a high threshold. If it is, the thread marks it as a strong edge (set to 1)
/// in a pixel map and sets the value to the channel max. If it is not, the thread sets
/// the pixel map at the index to 0 and zeros the output buffer space at that index.
///
/// The output of this step is a mask of strong edges and an output buffer with white values
/// at the mask indices which are set.
///
__global__
void cu_hysteresis_high(float *out_pixels, float *in_pixels, float *strong_edge_mask,
float t_high, int img_height, int img_width)
{
//printf("t_high=%f\n",t_high);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < (img_height * img_width)) {
/* apply high threshold */
//printf("pixel=%f\n",in_pixels[idx]);
if (in_pixels[idx] > t_high) {
strong_edge_mask[idx] = 1.0;
out_pixels[idx] = STRONG_EDGE;
} else {
strong_edge_mask[idx] = 0.0;
out_pixels[idx] = NON_EDGE;
}
}
}
///
/// \brief CUDA implementation of Canny hysteresis low thresholding.
///
/// This kernel is the second pass in the parallel hysteresis step.
/// It launches a thread for every pixel, but skips the first and last rows and columns.
/// For surviving threads, the pixel at the thread ID index is checked to see if it was
/// previously marked as a strong edge in the first pass. If it was, the thread checks
/// their eight immediate neighbors and connects them (marks them as strong edges)
/// if the neighbor is above the low threshold.
///
/// The output of this step is an output buffer with both "strong" and "connected" edges
/// set to whtie values. This is the final edge detected image.
///
__global__
void cu_hysteresis_low(float *out_pixels, float *in_pixels, float *strong_edge_mask,
float t_low, int img_height, int img_width)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ((idx > img_width) /* skip first row */
&& (idx < (img_height * img_width) - img_width) /* skip last row */
&& ((idx % img_width) < (img_width - 1)) /* skip last column */
&& ((idx % img_width) > (0)) ) /* skip first column */
{
if (1.0 == strong_edge_mask[idx]) { /* if this pixel was previously found to be a strong edge */
trace_immed_neighbors(out_pixels, in_pixels, idx, t_low, img_width);
}
}
}
__global__ void hysteresis_kernel(float* out_pixels,float * in_pixels,float t_low,float t_high,int img_height,int img_width){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ((idx > img_width) &&
(idx < (img_height * img_width) - img_width) &&
((idx % img_width) < (img_width - 1)) &&
((idx % img_width) > (0)) ){
float pixel = in_pixels[idx];
if (pixel != 255.0){
if (pixel > t_high){
out_pixels[idx] = (float)255.0;
trace_immed_neighbors(out_pixels,in_pixels,idx,t_low,img_width);
}else{
out_pixels[idx] = 0.0;
}
}
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
extern "C"{
#define RGB2GRAY_CONST_ARR_SIZE 3
#define STRONG_EDGE 255
#define NON_EDGE 0.0
#define KERNEL_SIZE 7
//*****************************************************************************************
// CUDA Gaussian Filter Implementation
//*****************************************************************************************
///
/// \brief Apply gaussian filter. This is the CUDA kernel for applying a gaussian blur to an image.
///
__global__ void cu_apply_gaussian_filter(float3 *in_pixels, float3 *out_pixels, int rows, int cols, double *in_kernel)
{
//copy kernel array from global memory to a shared array
__shared__ double kernel[KERNEL_SIZE][KERNEL_SIZE];
for (int i = 0; i < KERNEL_SIZE; ++i) {
for (int j = 0; j < KERNEL_SIZE; ++j) {
kernel[i][j] = in_kernel[i * KERNEL_SIZE + j];
}
}
__syncthreads();
//determine id of thread which corresponds to an individual pixel
int pixNum = blockIdx.x * blockDim.x + threadIdx.x;
if (pixNum >= 0 && pixNum < rows * cols) {
double kernelSum;
double redPixelVal;
double greenPixelVal;
double bluePixelVal;
//Apply Kernel to each pixel of image
for (int i = 0; i < KERNEL_SIZE; ++i) {
for (int j = 0; j < KERNEL_SIZE; ++j) {
//check edge cases, if within bounds, apply filter
if (((pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)) >= 0)
&& ((pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)) <= rows*cols-1)
&& (((pixNum % cols) + j - ((KERNEL_SIZE-1)/2)) >= 0)
&& (((pixNum % cols) + j - ((KERNEL_SIZE-1)/2)) <= (cols-1))) {
redPixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].x;
greenPixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].y;
bluePixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].z;
kernelSum += kernel[i][j];
}
}
}
//update output image
out_pixels[pixNum].x = redPixelVal / kernelSum;
out_pixels[pixNum].y = greenPixelVal / kernelSum;
out_pixels[pixNum].z = bluePixelVal / kernelSum;
}
}
//*****************************************************************************************
// CUDA Intensity Gradient Implementation
//*****************************************************************************************
///
/// \brief Compute gradient (first order derivative x and y). This is the CUDA kernel for taking the derivative of color contrasts in adjacent images.
///
__global__
void cu_compute_intensity_gradient(float3 *in_pixels, float *deltaX_channel, float *deltaY_channel, int parser_length, int offset)
{
// compute delta X ***************************
// deltaX = f(x+1) - f(x-1)
int idx = blockIdx.x * blockDim.x + threadIdx.x;
/* condition here skips first and last row */
if ((idx > offset) && (idx < (parser_length * offset) - offset))
{
float deltaXred = 0;
float deltaYred = 0;
float deltaXgreen = 0;
float deltaYgreen = 0;
float deltaXblue = 0;
float deltaYblue = 0;
/* first column */
if((idx % offset) == 0)
{
// gradient at the first pixel of each line
// note: at the edge pix[idx-1] does NOT exist
deltaXred = (float)(in_pixels[idx+1].x - in_pixels[idx].x);
deltaXgreen = (float)(in_pixels[idx+1].y - in_pixels[idx].y);
deltaXblue = (float)(in_pixels[idx+1].z - in_pixels[idx].z);
// gradient at the first pixel of each line
// note: at the edge pix[idx-1] does NOT exist
deltaYred = (float)(in_pixels[idx+offset].x - in_pixels[idx].x);
deltaYgreen = (float)(in_pixels[idx+offset].y - in_pixels[idx].y);
deltaYblue = (float)(in_pixels[idx+offset].z - in_pixels[idx].z);
}
/* last column */
else if((idx % offset) == (offset - 1))
{
deltaXred = (float)(in_pixels[idx].x - in_pixels[idx-1].x);
deltaXgreen = (float)(in_pixels[idx].y - in_pixels[idx-1].y);
deltaXblue = (float)(in_pixels[idx].z - in_pixels[idx-1].z);
deltaYred = (float)(in_pixels[idx].x - in_pixels[idx-offset].x);
deltaYgreen = (float)(in_pixels[idx].y - in_pixels[idx-offset].y);
deltaYblue = (float)(in_pixels[idx].z - in_pixels[idx-offset].z);
}
/* gradients where NOT edge */
else
{
deltaXred = (float)(in_pixels[idx+1].x - in_pixels[idx-1].x);
deltaXgreen = (float)(in_pixels[idx+1].y - in_pixels[idx-1].y);
deltaXblue = (float)(in_pixels[idx+1].z - in_pixels[idx-1].z);
deltaYred = (float)(in_pixels[idx+offset].x - in_pixels[idx-offset].x);
deltaYgreen = (float)(in_pixels[idx+offset].y - in_pixels[idx-offset].y);
deltaYblue = (float)(in_pixels[idx+offset].z - in_pixels[idx-offset].z);
}
deltaX_channel[idx] = (float)(0.2989 * deltaXred + 0.5870 * deltaXgreen + 0.1140 * deltaXblue);
deltaY_channel[idx] = (float)(0.2989 * deltaYred + 0.5870 * deltaYgreen + 0.1140 * deltaYblue);
}
}
//*****************************************************************************************
// CUDA Gradient Magnitude Implementation
//*****************************************************************************************
///
/// \brief Compute magnitude of gradient(deltaX & deltaY) per pixel.
///
__global__
void cu_magnitude(float *deltaX, float *deltaY, float *out_pixel, int parser_length, int offset)
{
//computation
//Assigned a thread to each pixel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= 0 && idx < parser_length * offset) {
out_pixel[idx] = (float)(sqrt((double)deltaX[idx]*deltaX[idx] +
(double)deltaY[idx]*deltaY[idx]) + 0.5);
}
}
//*****************************************************************************************
// CUDA Non Maximal Suppression Implementation
//*****************************************************************************************
///
/// \brief Non Maximal Suppression
/// If the centre pixel is not greater than neighboured pixels in the direction,
/// then the center pixel is set to zero.
/// This process results in one pixel wide ridges.
///
__global__ void cu_suppress_non_max(float *mag, float *deltaX, float *deltaY, float *nms, int parser_length, int offset)
{
const float SUPPRESSED = 0;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= 0 && idx < parser_length * offset)
{
float alpha;
float mag1, mag2;
// put zero all boundaries of image
// TOP edge line of the image
if((idx >= 0) && (idx <offset))
nms[idx] = 0;
// BOTTOM edge line of image
else if((idx >= (parser_length-1)*offset) && (idx < (offset * parser_length)))
nms[idx] = 0;
// LEFT & RIGHT edge line
else if(((idx % offset)==0) || ((idx % offset)==(offset - 1)))
{
nms[idx] = 0;
}
else // not the boundaries
{
// if magnitude = 0, no edge
if(mag[idx] == 0)
nms[idx] = SUPPRESSED;
else{
if(deltaX[idx] >= 0)
{
if(deltaY[idx] >= 0) // dx >= 0, dy >= 0
{
if((deltaX[idx] - deltaY[idx]) >= 0) // direction 1 (SEE, South-East-East)
{
alpha = (float)deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx+1] + alpha*mag[idx+offset+1];
mag2 = (1-alpha)*mag[idx-1] + alpha*mag[idx-offset-1];
}
else // direction 2 (SSE)
{
alpha = (float)deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset+1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset-1];
}
}
else // dx >= 0, dy < 0
{
if((deltaX[idx] + deltaY[idx]) >= 0) // direction 8 (NEE)
{
alpha = (float)-deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx+1] + alpha*mag[idx-offset+1];
mag2 = (1-alpha)*mag[idx-1] + alpha*mag[idx+offset-1];
}
else // direction 7 (NNE)
{
alpha = (float)deltaX[idx] / -deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset+1];
}
}
}
else
{
if(deltaY[idx] >= 0) // dx < 0, dy >= 0
{
if((deltaX[idx] + deltaY[idx]) >= 0) // direction 3 (SSW)
{
alpha = (float)-deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset+1];
}
else // direction 4 (SWW)
{
alpha = (float)deltaY[idx] / -deltaX[idx];
mag1 = (1-alpha)*mag[idx-1] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx+1] + alpha*mag[idx-offset+1];
}
}
else // dx < 0, dy < 0
{
if((-deltaX[idx] + deltaY[idx]) >= 0) // direction 5 (NWW)
{
alpha = (float)deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx-1] + alpha*mag[idx-offset-1];
mag2 = (1-alpha)*mag[idx+1] + alpha*mag[idx+offset+1];
}
else // direction 6 (NNW)
{
alpha = (float)deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset-1];
mag2 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset+1];
}
}
}
// non-maximal suppression
// compare mag1, mag2 and mag[t]
// if mag[t] is smaller than one of the neighbours then suppress it
if((mag[idx] < mag1) || (mag[idx] < mag2))
nms[idx] = SUPPRESSED;
else
{
nms[idx] = mag[idx];
}
} // END OF ELSE (mag != 0)
} // END OF FOR(j)
} // END OF FOR(i)
}
//*****************************************************************************************
// CUDA Hysteresis Implementation
//*****************************************************************************************
///
/// \brief This is a helper function that runs on the GPU.
///
/// It checks if the eight immediate neighbors of a pixel at a given index are above
/// a low threshold, and if they are, sets them to strong edges. This effectively
/// connects the edges.
///
__device__
void trace_immed_neighbors(float *out_pixels, float *in_pixels,
int idx, float t_low, int img_width)
{
/* directions representing indices of neighbors */
unsigned n, s, e, w;
unsigned nw, ne, sw, se;
/* get indices */
n = idx - img_width;
nw = n - 1;
ne = n + 1;
s = idx + img_width;
sw = s - 1;
se = s + 1;
w = idx - 1;
e = idx + 1;
if (in_pixels[nw] >= t_low &&in_pixels[nw]!=255.0 ) {
out_pixels[nw] = STRONG_EDGE;
}
if (in_pixels[n] >= t_low&&in_pixels[n]!=255.0) {
out_pixels[n] = STRONG_EDGE;
}
if (in_pixels[ne] >= t_low&&in_pixels[ne]!=255.0) {
out_pixels[ne] = STRONG_EDGE;
}
if (in_pixels[w] >= t_low&&in_pixels[w]!=255.0) {
out_pixels[w] = STRONG_EDGE;
}
if (in_pixels[e] >= t_low&&in_pixels[e]!=255.0) {
out_pixels[e] = STRONG_EDGE;
}
if (in_pixels[sw] >= t_low&&in_pixels[sw]!=255.0) {
out_pixels[sw] = STRONG_EDGE;
}
if (in_pixels[s] >= t_low&&in_pixels[s]!=255.0) {
out_pixels[s] = STRONG_EDGE;
}
if (in_pixels[se] >= t_low&&in_pixels[se]!=255.0) {
out_pixels[se] = STRONG_EDGE;
}
}
///
/// \brief CUDA implementation of Canny hysteresis high thresholding.
///
/// This kernel is the first pass in the parallel hysteresis step.
/// It launches a thread for every pixel and checks if the value of that pixel
/// is above a high threshold. If it is, the thread marks it as a strong edge (set to 1)
/// in a pixel map and sets the value to the channel max. If it is not, the thread sets
/// the pixel map at the index to 0 and zeros the output buffer space at that index.
///
/// The output of this step is a mask of strong edges and an output buffer with white values
/// at the mask indices which are set.
///
__global__
void cu_hysteresis_high(float *out_pixels, float *in_pixels, float *strong_edge_mask,
float t_high, int img_height, int img_width)
{
//printf("t_high=%f\n",t_high);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < (img_height * img_width)) {
/* apply high threshold */
//printf("pixel=%f\n",in_pixels[idx]);
if (in_pixels[idx] > t_high) {
strong_edge_mask[idx] = 1.0;
out_pixels[idx] = STRONG_EDGE;
} else {
strong_edge_mask[idx] = 0.0;
out_pixels[idx] = NON_EDGE;
}
}
}
///
/// \brief CUDA implementation of Canny hysteresis low thresholding.
///
/// This kernel is the second pass in the parallel hysteresis step.
/// It launches a thread for every pixel, but skips the first and last rows and columns.
/// For surviving threads, the pixel at the thread ID index is checked to see if it was
/// previously marked as a strong edge in the first pass. If it was, the thread checks
/// their eight immediate neighbors and connects them (marks them as strong edges)
/// if the neighbor is above the low threshold.
///
/// The output of this step is an output buffer with both "strong" and "connected" edges
/// set to whtie values. This is the final edge detected image.
///
__global__
void cu_hysteresis_low(float *out_pixels, float *in_pixels, float *strong_edge_mask,
float t_low, int img_height, int img_width)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ((idx > img_width) /* skip first row */
&& (idx < (img_height * img_width) - img_width) /* skip last row */
&& ((idx % img_width) < (img_width - 1)) /* skip last column */
&& ((idx % img_width) > (0)) ) /* skip first column */
{
if (1.0 == strong_edge_mask[idx]) { /* if this pixel was previously found to be a strong edge */
trace_immed_neighbors(out_pixels, in_pixels, idx, t_low, img_width);
}
}
}
__global__ void hysteresis_kernel(float* out_pixels,float * in_pixels,float t_low,float t_high,int img_height,int img_width){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ((idx > img_width) &&
(idx < (img_height * img_width) - img_width) &&
((idx % img_width) < (img_width - 1)) &&
((idx % img_width) > (0)) ){
float pixel = in_pixels[idx];
if (pixel != 255.0){
if (pixel > t_high){
out_pixels[idx] = (float)255.0;
trace_immed_neighbors(out_pixels,in_pixels,idx,t_low,img_width);
}else{
out_pixels[idx] = 0.0;
}
}
}
}
} | .text
.file "27-canny.hip"
.globl __device_stub__cu_apply_gaussian_filter # -- Begin function __device_stub__cu_apply_gaussian_filter
.p2align 4, 0x90
.type __device_stub__cu_apply_gaussian_filter,@function
__device_stub__cu_apply_gaussian_filter: # @__device_stub__cu_apply_gaussian_filter
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movl %ecx, (%rsp)
movq %r8, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsp, %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $cu_apply_gaussian_filter, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size __device_stub__cu_apply_gaussian_filter, .Lfunc_end0-__device_stub__cu_apply_gaussian_filter
.cfi_endproc
# -- End function
.globl __device_stub__cu_compute_intensity_gradient # -- Begin function __device_stub__cu_compute_intensity_gradient
.p2align 4, 0x90
.type __device_stub__cu_compute_intensity_gradient,@function
__device_stub__cu_compute_intensity_gradient: # @__device_stub__cu_compute_intensity_gradient
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $cu_compute_intensity_gradient, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size __device_stub__cu_compute_intensity_gradient, .Lfunc_end1-__device_stub__cu_compute_intensity_gradient
.cfi_endproc
# -- End function
.globl __device_stub__cu_magnitude # -- Begin function __device_stub__cu_magnitude
.p2align 4, 0x90
.type __device_stub__cu_magnitude,@function
__device_stub__cu_magnitude: # @__device_stub__cu_magnitude
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $cu_magnitude, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size __device_stub__cu_magnitude, .Lfunc_end2-__device_stub__cu_magnitude
.cfi_endproc
# -- End function
.globl __device_stub__cu_suppress_non_max # -- Begin function __device_stub__cu_suppress_non_max
.p2align 4, 0x90
.type __device_stub__cu_suppress_non_max,@function
__device_stub__cu_suppress_non_max: # @__device_stub__cu_suppress_non_max
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $cu_suppress_non_max, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end3:
.size __device_stub__cu_suppress_non_max, .Lfunc_end3-__device_stub__cu_suppress_non_max
.cfi_endproc
# -- End function
.globl __device_stub__cu_hysteresis_high # -- Begin function __device_stub__cu_hysteresis_high
.p2align 4, 0x90
.type __device_stub__cu_hysteresis_high,@function
__device_stub__cu_hysteresis_high: # @__device_stub__cu_hysteresis_high
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movss %xmm0, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $cu_hysteresis_high, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end4:
.size __device_stub__cu_hysteresis_high, .Lfunc_end4-__device_stub__cu_hysteresis_high
.cfi_endproc
# -- End function
.globl __device_stub__cu_hysteresis_low # -- Begin function __device_stub__cu_hysteresis_low
.p2align 4, 0x90
.type __device_stub__cu_hysteresis_low,@function
__device_stub__cu_hysteresis_low: # @__device_stub__cu_hysteresis_low
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movss %xmm0, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $cu_hysteresis_low, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end5:
.size __device_stub__cu_hysteresis_low, .Lfunc_end5-__device_stub__cu_hysteresis_low
.cfi_endproc
# -- End function
.globl __device_stub__hysteresis_kernel # -- Begin function __device_stub__hysteresis_kernel
.p2align 4, 0x90
.type __device_stub__hysteresis_kernel,@function
__device_stub__hysteresis_kernel: # @__device_stub__hysteresis_kernel
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
movss %xmm1, 8(%rsp)
movl %edx, 4(%rsp)
movl %ecx, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $hysteresis_kernel, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end6:
.size __device_stub__hysteresis_kernel, .Lfunc_end6-__device_stub__hysteresis_kernel
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cu_apply_gaussian_filter, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cu_compute_intensity_gradient, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cu_magnitude, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cu_suppress_non_max, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cu_hysteresis_high, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cu_hysteresis_low, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $hysteresis_kernel, %esi
movl $.L__unnamed_7, %edx
movl $.L__unnamed_7, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type cu_apply_gaussian_filter,@object # @cu_apply_gaussian_filter
.section .rodata,"a",@progbits
.globl cu_apply_gaussian_filter
.p2align 3, 0x0
cu_apply_gaussian_filter:
.quad __device_stub__cu_apply_gaussian_filter
.size cu_apply_gaussian_filter, 8
.type cu_compute_intensity_gradient,@object # @cu_compute_intensity_gradient
.globl cu_compute_intensity_gradient
.p2align 3, 0x0
cu_compute_intensity_gradient:
.quad __device_stub__cu_compute_intensity_gradient
.size cu_compute_intensity_gradient, 8
.type cu_magnitude,@object # @cu_magnitude
.globl cu_magnitude
.p2align 3, 0x0
cu_magnitude:
.quad __device_stub__cu_magnitude
.size cu_magnitude, 8
.type cu_suppress_non_max,@object # @cu_suppress_non_max
.globl cu_suppress_non_max
.p2align 3, 0x0
cu_suppress_non_max:
.quad __device_stub__cu_suppress_non_max
.size cu_suppress_non_max, 8
.type cu_hysteresis_high,@object # @cu_hysteresis_high
.globl cu_hysteresis_high
.p2align 3, 0x0
cu_hysteresis_high:
.quad __device_stub__cu_hysteresis_high
.size cu_hysteresis_high, 8
.type cu_hysteresis_low,@object # @cu_hysteresis_low
.globl cu_hysteresis_low
.p2align 3, 0x0
cu_hysteresis_low:
.quad __device_stub__cu_hysteresis_low
.size cu_hysteresis_low, 8
.type hysteresis_kernel,@object # @hysteresis_kernel
.globl hysteresis_kernel
.p2align 3, 0x0
hysteresis_kernel:
.quad __device_stub__hysteresis_kernel
.size hysteresis_kernel, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "cu_apply_gaussian_filter"
.size .L__unnamed_1, 25
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "cu_compute_intensity_gradient"
.size .L__unnamed_2, 30
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "cu_magnitude"
.size .L__unnamed_3, 13
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "cu_suppress_non_max"
.size .L__unnamed_4, 20
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "cu_hysteresis_high"
.size .L__unnamed_5, 19
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "cu_hysteresis_low"
.size .L__unnamed_6, 18
.type .L__unnamed_7,@object # @6
.L__unnamed_7:
.asciz "hysteresis_kernel"
.size .L__unnamed_7, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__cu_apply_gaussian_filter
.addrsig_sym __device_stub__cu_compute_intensity_gradient
.addrsig_sym __device_stub__cu_magnitude
.addrsig_sym __device_stub__cu_suppress_non_max
.addrsig_sym __device_stub__cu_hysteresis_high
.addrsig_sym __device_stub__cu_hysteresis_low
.addrsig_sym __device_stub__hysteresis_kernel
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym cu_apply_gaussian_filter
.addrsig_sym cu_compute_intensity_gradient
.addrsig_sym cu_magnitude
.addrsig_sym cu_suppress_non_max
.addrsig_sym cu_hysteresis_high
.addrsig_sym cu_hysteresis_low
.addrsig_sym hysteresis_kernel
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000fbdaf_00000000-6_27-canny.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl trace_immed_neighbors
.type trace_immed_neighbors, @function
trace_immed_neighbors:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size trace_immed_neighbors, .-trace_immed_neighbors
.globl _Z57__device_stub__Z24cu_apply_gaussian_filterP6float3S0_iiPdP6float3S0_iiPd
.type _Z57__device_stub__Z24cu_apply_gaussian_filterP6float3S0_iiPdP6float3S0_iiPd, @function
_Z57__device_stub__Z24cu_apply_gaussian_filterP6float3S0_iiPdP6float3S0_iiPd:
.LFB2052:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq cu_apply_gaussian_filter(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z57__device_stub__Z24cu_apply_gaussian_filterP6float3S0_iiPdP6float3S0_iiPd, .-_Z57__device_stub__Z24cu_apply_gaussian_filterP6float3S0_iiPdP6float3S0_iiPd
.globl cu_apply_gaussian_filter
.type cu_apply_gaussian_filter, @function
cu_apply_gaussian_filter:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z57__device_stub__Z24cu_apply_gaussian_filterP6float3S0_iiPdP6float3S0_iiPd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size cu_apply_gaussian_filter, .-cu_apply_gaussian_filter
.globl _Z62__device_stub__Z29cu_compute_intensity_gradientP6float3PfS1_iiP6float3PfS1_ii
.type _Z62__device_stub__Z29cu_compute_intensity_gradientP6float3PfS1_iiP6float3PfS1_ii, @function
_Z62__device_stub__Z29cu_compute_intensity_gradientP6float3PfS1_iiP6float3PfS1_ii:
.LFB2054:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq cu_compute_intensity_gradient(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2054:
.size _Z62__device_stub__Z29cu_compute_intensity_gradientP6float3PfS1_iiP6float3PfS1_ii, .-_Z62__device_stub__Z29cu_compute_intensity_gradientP6float3PfS1_iiP6float3PfS1_ii
.globl cu_compute_intensity_gradient
.type cu_compute_intensity_gradient, @function
cu_compute_intensity_gradient:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z62__device_stub__Z29cu_compute_intensity_gradientP6float3PfS1_iiP6float3PfS1_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size cu_compute_intensity_gradient, .-cu_compute_intensity_gradient
.globl _Z38__device_stub__Z12cu_magnitudePfS_S_iiPfS_S_ii
.type _Z38__device_stub__Z12cu_magnitudePfS_S_iiPfS_S_ii, @function
_Z38__device_stub__Z12cu_magnitudePfS_S_iiPfS_S_ii:
.LFB2056:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L25
.L21:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq cu_magnitude(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L21
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2056:
.size _Z38__device_stub__Z12cu_magnitudePfS_S_iiPfS_S_ii, .-_Z38__device_stub__Z12cu_magnitudePfS_S_iiPfS_S_ii
.globl cu_magnitude
.type cu_magnitude, @function
cu_magnitude:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z12cu_magnitudePfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size cu_magnitude, .-cu_magnitude
.globl _Z47__device_stub__Z19cu_suppress_non_maxPfS_S_S_iiPfS_S_S_ii
.type _Z47__device_stub__Z19cu_suppress_non_maxPfS_S_S_iiPfS_S_S_ii, @function
_Z47__device_stub__Z19cu_suppress_non_maxPfS_S_S_iiPfS_S_S_ii:
.LFB2058:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq cu_suppress_non_max(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z47__device_stub__Z19cu_suppress_non_maxPfS_S_S_iiPfS_S_S_ii, .-_Z47__device_stub__Z19cu_suppress_non_maxPfS_S_S_iiPfS_S_S_ii
.globl cu_suppress_non_max
.type cu_suppress_non_max, @function
cu_suppress_non_max:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z47__device_stub__Z19cu_suppress_non_maxPfS_S_S_iiPfS_S_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size cu_suppress_non_max, .-cu_suppress_non_max
.globl _Z45__device_stub__Z18cu_hysteresis_highPfS_S_fiiPfS_S_fii
.type _Z45__device_stub__Z18cu_hysteresis_highPfS_S_fiiPfS_S_fii, @function
_Z45__device_stub__Z18cu_hysteresis_highPfS_S_fiiPfS_S_fii:
.LFB2060:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movss %xmm0, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L41
.L37:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L42
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq cu_hysteresis_high(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L37
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size _Z45__device_stub__Z18cu_hysteresis_highPfS_S_fiiPfS_S_fii, .-_Z45__device_stub__Z18cu_hysteresis_highPfS_S_fiiPfS_S_fii
.globl cu_hysteresis_high
.type cu_hysteresis_high, @function
cu_hysteresis_high:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z18cu_hysteresis_highPfS_S_fiiPfS_S_fii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size cu_hysteresis_high, .-cu_hysteresis_high
.globl _Z44__device_stub__Z17cu_hysteresis_lowPfS_S_fiiPfS_S_fii
.type _Z44__device_stub__Z17cu_hysteresis_lowPfS_S_fiiPfS_S_fii, @function
_Z44__device_stub__Z17cu_hysteresis_lowPfS_S_fiiPfS_S_fii:
.LFB2062:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movss %xmm0, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L49
.L45:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L50
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L49:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq cu_hysteresis_low(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L45
.L50:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2062:
.size _Z44__device_stub__Z17cu_hysteresis_lowPfS_S_fiiPfS_S_fii, .-_Z44__device_stub__Z17cu_hysteresis_lowPfS_S_fiiPfS_S_fii
.globl cu_hysteresis_low
.type cu_hysteresis_low, @function
cu_hysteresis_low:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z17cu_hysteresis_lowPfS_S_fiiPfS_S_fii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size cu_hysteresis_low, .-cu_hysteresis_low
.globl _Z43__device_stub__Z17hysteresis_kernelPfS_ffiiPfS_ffii
.type _Z43__device_stub__Z17hysteresis_kernelPfS_ffiiPfS_ffii, @function
_Z43__device_stub__Z17hysteresis_kernelPfS_ffiiPfS_ffii:
.LFB2064:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movss %xmm1, 8(%rsp)
movl %edx, 4(%rsp)
movl %ecx, (%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L57
.L53:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L58
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L57:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 184
pushq 40(%rsp)
.cfi_def_cfa_offset 192
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq hysteresis_kernel(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L53
.L58:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2064:
.size _Z43__device_stub__Z17hysteresis_kernelPfS_ffiiPfS_ffii, .-_Z43__device_stub__Z17hysteresis_kernelPfS_ffiiPfS_ffii
.globl hysteresis_kernel
.type hysteresis_kernel, @function
hysteresis_kernel:
.LFB2065:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z43__device_stub__Z17hysteresis_kernelPfS_ffiiPfS_ffii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2065:
.size hysteresis_kernel, .-hysteresis_kernel
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "hysteresis_kernel"
.LC1:
.string "cu_hysteresis_low"
.LC2:
.string "cu_hysteresis_high"
.LC3:
.string "cu_suppress_non_max"
.LC4:
.string "cu_magnitude"
.LC5:
.string "cu_compute_intensity_gradient"
.LC6:
.string "cu_apply_gaussian_filter"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2067:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq hysteresis_kernel(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq cu_hysteresis_low(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq cu_hysteresis_high(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq cu_suppress_non_max(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq cu_magnitude(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq cu_compute_intensity_gradient(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq cu_apply_gaussian_filter(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2067:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "27-canny.hip"
.globl __device_stub__cu_apply_gaussian_filter # -- Begin function __device_stub__cu_apply_gaussian_filter
.p2align 4, 0x90
.type __device_stub__cu_apply_gaussian_filter,@function
__device_stub__cu_apply_gaussian_filter: # @__device_stub__cu_apply_gaussian_filter
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movl %ecx, (%rsp)
movq %r8, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsp, %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $cu_apply_gaussian_filter, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size __device_stub__cu_apply_gaussian_filter, .Lfunc_end0-__device_stub__cu_apply_gaussian_filter
.cfi_endproc
# -- End function
.globl __device_stub__cu_compute_intensity_gradient # -- Begin function __device_stub__cu_compute_intensity_gradient
.p2align 4, 0x90
.type __device_stub__cu_compute_intensity_gradient,@function
__device_stub__cu_compute_intensity_gradient: # @__device_stub__cu_compute_intensity_gradient
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $cu_compute_intensity_gradient, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size __device_stub__cu_compute_intensity_gradient, .Lfunc_end1-__device_stub__cu_compute_intensity_gradient
.cfi_endproc
# -- End function
.globl __device_stub__cu_magnitude # -- Begin function __device_stub__cu_magnitude
.p2align 4, 0x90
.type __device_stub__cu_magnitude,@function
__device_stub__cu_magnitude: # @__device_stub__cu_magnitude
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $cu_magnitude, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size __device_stub__cu_magnitude, .Lfunc_end2-__device_stub__cu_magnitude
.cfi_endproc
# -- End function
.globl __device_stub__cu_suppress_non_max # -- Begin function __device_stub__cu_suppress_non_max
.p2align 4, 0x90
.type __device_stub__cu_suppress_non_max,@function
__device_stub__cu_suppress_non_max: # @__device_stub__cu_suppress_non_max
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $cu_suppress_non_max, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end3:
.size __device_stub__cu_suppress_non_max, .Lfunc_end3-__device_stub__cu_suppress_non_max
.cfi_endproc
# -- End function
.globl __device_stub__cu_hysteresis_high # -- Begin function __device_stub__cu_hysteresis_high
.p2align 4, 0x90
.type __device_stub__cu_hysteresis_high,@function
__device_stub__cu_hysteresis_high: # @__device_stub__cu_hysteresis_high
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movss %xmm0, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $cu_hysteresis_high, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end4:
.size __device_stub__cu_hysteresis_high, .Lfunc_end4-__device_stub__cu_hysteresis_high
.cfi_endproc
# -- End function
.globl __device_stub__cu_hysteresis_low # -- Begin function __device_stub__cu_hysteresis_low
.p2align 4, 0x90
.type __device_stub__cu_hysteresis_low,@function
__device_stub__cu_hysteresis_low: # @__device_stub__cu_hysteresis_low
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movss %xmm0, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $cu_hysteresis_low, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end5:
.size __device_stub__cu_hysteresis_low, .Lfunc_end5-__device_stub__cu_hysteresis_low
.cfi_endproc
# -- End function
.globl __device_stub__hysteresis_kernel # -- Begin function __device_stub__hysteresis_kernel
.p2align 4, 0x90
.type __device_stub__hysteresis_kernel,@function
__device_stub__hysteresis_kernel: # @__device_stub__hysteresis_kernel
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
movss %xmm1, 8(%rsp)
movl %edx, 4(%rsp)
movl %ecx, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $hysteresis_kernel, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end6:
.size __device_stub__hysteresis_kernel, .Lfunc_end6-__device_stub__hysteresis_kernel
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cu_apply_gaussian_filter, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cu_compute_intensity_gradient, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cu_magnitude, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cu_suppress_non_max, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cu_hysteresis_high, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cu_hysteresis_low, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $hysteresis_kernel, %esi
movl $.L__unnamed_7, %edx
movl $.L__unnamed_7, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type cu_apply_gaussian_filter,@object # @cu_apply_gaussian_filter
.section .rodata,"a",@progbits
.globl cu_apply_gaussian_filter
.p2align 3, 0x0
cu_apply_gaussian_filter:
.quad __device_stub__cu_apply_gaussian_filter
.size cu_apply_gaussian_filter, 8
.type cu_compute_intensity_gradient,@object # @cu_compute_intensity_gradient
.globl cu_compute_intensity_gradient
.p2align 3, 0x0
cu_compute_intensity_gradient:
.quad __device_stub__cu_compute_intensity_gradient
.size cu_compute_intensity_gradient, 8
.type cu_magnitude,@object # @cu_magnitude
.globl cu_magnitude
.p2align 3, 0x0
cu_magnitude:
.quad __device_stub__cu_magnitude
.size cu_magnitude, 8
.type cu_suppress_non_max,@object # @cu_suppress_non_max
.globl cu_suppress_non_max
.p2align 3, 0x0
cu_suppress_non_max:
.quad __device_stub__cu_suppress_non_max
.size cu_suppress_non_max, 8
.type cu_hysteresis_high,@object # @cu_hysteresis_high
.globl cu_hysteresis_high
.p2align 3, 0x0
cu_hysteresis_high:
.quad __device_stub__cu_hysteresis_high
.size cu_hysteresis_high, 8
.type cu_hysteresis_low,@object # @cu_hysteresis_low
.globl cu_hysteresis_low
.p2align 3, 0x0
cu_hysteresis_low:
.quad __device_stub__cu_hysteresis_low
.size cu_hysteresis_low, 8
.type hysteresis_kernel,@object # @hysteresis_kernel
.globl hysteresis_kernel
.p2align 3, 0x0
hysteresis_kernel:
.quad __device_stub__hysteresis_kernel
.size hysteresis_kernel, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "cu_apply_gaussian_filter"
.size .L__unnamed_1, 25
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "cu_compute_intensity_gradient"
.size .L__unnamed_2, 30
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "cu_magnitude"
.size .L__unnamed_3, 13
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "cu_suppress_non_max"
.size .L__unnamed_4, 20
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "cu_hysteresis_high"
.size .L__unnamed_5, 19
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "cu_hysteresis_low"
.size .L__unnamed_6, 18
.type .L__unnamed_7,@object # @6
.L__unnamed_7:
.asciz "hysteresis_kernel"
.size .L__unnamed_7, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__cu_apply_gaussian_filter
.addrsig_sym __device_stub__cu_compute_intensity_gradient
.addrsig_sym __device_stub__cu_magnitude
.addrsig_sym __device_stub__cu_suppress_non_max
.addrsig_sym __device_stub__cu_hysteresis_high
.addrsig_sym __device_stub__cu_hysteresis_low
.addrsig_sym __device_stub__hysteresis_kernel
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym cu_apply_gaussian_filter
.addrsig_sym cu_compute_intensity_gradient
.addrsig_sym cu_magnitude
.addrsig_sym cu_suppress_non_max
.addrsig_sym cu_hysteresis_high
.addrsig_sym cu_hysteresis_low
.addrsig_sym hysteresis_kernel
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "thrust.cuh"
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <thrust/for_each.h>
#include "mod_range.cuh"
namespace my_thrust {
// void my_thrust::stable_sort() {
// thrust::device_ptr<float> d_ptr = thrust::device_malloc<float>(3);
// thrust::device_ptr<float> first = d_ptr;
// thrust::device_ptr<float> last = d_ptr + 3;
// d_ptr[0] = 3.0; d_ptr[1] = 2.0; d_ptr[2] = 1.0;
// thrust::stable_sort(first, last);
// std::cout << d_ptr[0] << ", " << d_ptr[1] << ", " << d_ptr[2] << std::endl;
// thrust::device_free(d_ptr);
// }
static const int NSORTS = 16000;
static const int DSIZE = 1000;
struct Mod {
int d_;
int p_;
Mod(int d) : d_(d) {}
int operator()() {
return p_++ / d_;
}
};
thrust::device_vector<int> gen_rand() {
thrust::host_vector<int> h_data(DSIZE*NSORTS);
thrust::generate(h_data.begin(), h_data.end(), rand);
thrust::device_vector<int> d_data = h_data;
return d_data;
}
bool validate(const thrust::device_vector<int> &d1, const thrust::device_vector<int> &d2){
return thrust::equal(d1.cbegin(), d1.cend(), d2.cbegin());
}
void print(const thrust::device_vector<int>& result) {
std::cout << result[0] << ", " << result[1] << ", " << result[2] << " ... ";
std::cout << result[DSIZE-3] << ", " << result[DSIZE-2] << ", " << result[DSIZE-1] << std::endl;
int c = (NSORTS - 1) * DSIZE;
std::cout << result[c+0] << ", " << result[c+1] << ", " << result[c+2] << " ... ";
std::cout << result[c+DSIZE-3] << ", " << result[c+DSIZE-2] << ", " << result[c+DSIZE-1] << std::endl;
}
template <class Proc>
void benchmark(Proc proc) {
thrust::device_vector<int> d_vec = gen_rand();
auto expect = d_vec;
for (int i = 0; i < NSORTS; i++) {
thrust::sort(expect.begin() + (i*DSIZE), expect.begin() + ((i+1)*DSIZE));
}
// https://ivanlife.wordpress.com/2011/05/09/time-cuda/
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
proc(d_vec);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time, start, stop);
print(d_vec);
printf ("Time for the kernel: %f ms\n", time);
if (validate(d_vec, expect)) {
printf("OK!\n");
} else {
printf("failed!\n");
}
}
void stable_sort_batch_vector() {
// ??? count_iterator ???
benchmark([=](thrust::device_vector<int>& d_vec) {
// thrust::host_vector<int> h_segments(DSIZE*NSORTS);
// thrust::generate(h_segments.begin(), h_segments.end(), Mod(DSIZE));
// thrust::device_vector<int> d_segments = h_segments;
typedef thrust::device_vector<int>::iterator Iterator;
mod_range<Iterator> d_segments(d_vec.begin(), d_vec.end(), DSIZE);
thrust::stable_sort_by_key(d_vec.begin(), d_vec.end(), d_segments.begin());
// thrust::stable_sort_by_key(d_segments.begin(), d_segments.end(), d_vec.begin());
});
}
struct SortFunctor
{
thrust::device_ptr<int> data;
int dsize;
__host__ __device__
void operator()(int start_idx)
{
thrust::sort(thrust::device, data+(dsize*start_idx), data+(dsize*(start_idx+1)));
}
};
void stable_sort_batch_nested() {
benchmark([=](thrust::device_vector<int>& d_vec) {
cudaDeviceSetLimit(cudaLimitMallocHeapSize, (16*DSIZE*NSORTS));
thrust::device_vector<int> d_result3 = gen_rand();
SortFunctor f = {d_result3.data(), DSIZE};
thrust::device_vector<int> idxs(NSORTS);
thrust::sequence(idxs.begin(), idxs.end());
thrust::for_each(idxs.begin(), idxs.end(), f);
});
}
} // namespace | #include <hip/hip_runtime.h>
namespace my_thrust {
void stable_sort_batch_vector();
void stable_sort_batch_nested();
} // namespace my_thrust |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
namespace my_thrust {
void stable_sort_batch_vector();
void stable_sort_batch_nested();
} // namespace my_thrust | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
namespace my_thrust {
void stable_sort_batch_vector();
void stable_sort_batch_nested();
} // namespace my_thrust | .text
.file "thrust.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <iostream>
using namespace std;
#define H 1000
#define W 1000
__global__ void multMatCUDA(int *d_a,int *d_b,int *d_c){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < H && col < W){
int result = 0;
for(int k = 0; k < W; k++){
result += d_a[row * W + k] * d_b[k * W + col];
}
d_c[row * W + col] = result;
}
}
void multMat(int *h_a, int *h_b, int *h_c){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
int result = 0;
for(int k = 0; k < W; k++){
result += h_a[i * W + k] * h_b[k * W + j];
}
h_c[i * W + j] = result;
}
}
}
bool compareTo(int *h_c,int *h_result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
if(h_c[i * W + j] != h_result[i * W + j]){
return false;
}
}
}
return true;
}
void printMatrix(int *result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
cout<<result[i * W + j]<<" ";
}
cout<<endl;
}
}
int main(){
clock_t start, end;
double cpu_time_used, gpu_time_used;
float blockSize = 32;
int *h_a, *h_b, *h_c, *d_a, *d_b, *d_c, *h_result;
//Asignar memoria en el host
h_a = (int*)malloc(sizeof(int)*H*W);
h_b = (int*)malloc(sizeof(int)*H*W);
h_c = (int*)malloc(sizeof(int)*H*W);
h_result = (int*)malloc(sizeof(int)*H*W);
//Inicializar las matrices
for(int i = 0; i < H; i++){
for(int j=0; j < W; j++){
h_a[i*W+j] = i;
h_b[i*W+j] = i+1;
h_c[i*W+j] = 0;
}
}
start = clock();
//Llamar funcion que sume dos vectores y retorne el resultado en h_c
multMat(h_a, h_b, h_c);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido CPU = %lf s\n", cpu_time_used);
//Asignacion de memoria en el device
cudaMalloc(&d_a, sizeof(int)*H*W);
cudaMalloc(&d_b, sizeof(int)*H*W);
cudaMalloc(&d_c, sizeof(int)*H*W);
//Copiar los datos del host al device
cudaMemcpy(d_a, h_a, H*W* sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, H*W* sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(ceil(W/blockSize),ceil(H/blockSize),1);
start = clock();
multMatCUDA<<< dimGrid, dimBlock >>>(d_a, d_b, d_c);
cudaMemcpy(h_result, d_c, H*W*sizeof(int), cudaMemcpyDeviceToHost);
end = clock();
gpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido GPU = %lf s\n", gpu_time_used);
if(compareTo(h_c, h_result)){
printf("Matrices Iguales");
}
else{
printf("Matrices Diferentes");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
free(h_result);
return 0;
} | code for sm_80
Function : _Z11multMatCUDAPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R7, SR_CTAID.Y ; /* 0x0000000000077919 */
/* 0x000e680000002600 */
/*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GT.AND P0, PT, R0, 0x3e7, PT ; /* 0x000003e70000780c */
/* 0x000fe20003f04270 */
/*0070*/ IMAD R7, R7, c[0x0][0x4], R2 ; /* 0x0000010007077a24 */
/* 0x002fca00078e0202 */
/*0080*/ ISETP.GT.OR P0, PT, R7, 0x3e7, P0 ; /* 0x000003e70700780c */
/* 0x000fda0000704670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R7, R7, 0x3e8, RZ ; /* 0x000003e807077824 */
/* 0x000fe200078e02ff */
/*00c0*/ HFMA2.MMA R9, -RZ, RZ, 0, 0 ; /* 0x00000000ff097435 */
/* 0x000fe200000001ff */
/*00d0*/ MOV R24, RZ ; /* 0x000000ff00187202 */
/* 0x000fe20000000f00 */
/*00e0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*00f0*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */
/* 0x000fc80000000a00 */
/*0100*/ IMAD.WIDE R2, R7, R6, c[0x0][0x160] ; /* 0x0000580007027625 */
/* 0x000fca00078e0206 */
/*0110*/ IADD3 R2, P0, R2, 0x10, RZ ; /* 0x0000001002027810 */
/* 0x000fc80007f1e0ff */
/*0120*/ IADD3.X R3, RZ, R3, RZ, P0, !PT ; /* 0x00000003ff037210 */
/* 0x000fc800007fe4ff */
/*0130*/ MOV R4, UR6 ; /* 0x0000000600047c02 */
/* 0x000fe20008000f00 */
/*0140*/ LDG.E R8, [R2.64+-0x10] ; /* 0xfffff00402087981 */
/* 0x000ea2000c1e1900 */
/*0150*/ MOV R5, UR7 ; /* 0x0000000700057c02 */
/* 0x000fc60008000f00 */
/*0160*/ LDG.E R22, [R2.64+-0xc] ; /* 0xfffff40402167981 */
/* 0x000ee4000c1e1900 */
/*0170*/ IMAD.WIDE R4, R0, 0x4, R4 ; /* 0x0000000400047825 */
/* 0x000fe400078e0204 */
/*0180*/ LDG.E R20, [R2.64+-0x8] ; /* 0xfffff80402147981 */
/* 0x000f28000c1e1900 */
/*0190*/ LDG.E R11, [R4.64] ; /* 0x00000004040b7981 */
/* 0x000ea8000c1e1900 */
/*01a0*/ LDG.E R25, [R4.64+0xfa0] ; /* 0x000fa00404197981 */
/* 0x000ee8000c1e1900 */
/*01b0*/ LDG.E R23, [R4.64+0x1f40] ; /* 0x001f400404177981 */
/* 0x000f28000c1e1900 */
/*01c0*/ LDG.E R21, [R2.64+-0x4] ; /* 0xfffffc0402157981 */
/* 0x000f68000c1e1900 */
/*01d0*/ LDG.E R18, [R4.64+0x2ee0] ; /* 0x002ee00404127981 */
/* 0x000f68000c1e1900 */
/*01e0*/ LDG.E R16, [R2.64] ; /* 0x0000000402107981 */
/* 0x000f68000c1e1900 */
/*01f0*/ LDG.E R19, [R4.64+0x3e80] ; /* 0x003e800404137981 */
/* 0x000f68000c1e1900 */
/*0200*/ LDG.E R14, [R2.64+0x4] ; /* 0x00000404020e7981 */
/* 0x000f68000c1e1900 */
/*0210*/ LDG.E R17, [R4.64+0x4e20] ; /* 0x004e200404117981 */
/* 0x000f68000c1e1900 */
/*0220*/ LDG.E R12, [R2.64+0x8] ; /* 0x00000804020c7981 */
/* 0x000f68000c1e1900 */
/*0230*/ LDG.E R15, [R4.64+0x5dc0] ; /* 0x005dc004040f7981 */
/* 0x000f68000c1e1900 */
/*0240*/ LDG.E R10, [R2.64+0xc] ; /* 0x00000c04020a7981 */
/* 0x000f68000c1e1900 */
/*0250*/ LDG.E R13, [R4.64+0x6d60] ; /* 0x006d6004040d7981 */
/* 0x000f68000c1e1900 */
/*0260*/ LDG.E R27, [R2.64+0x8c] ; /* 0x00008c04021b7981 */
/* 0x000162000c1e1900 */
/*0270*/ IMAD R24, R11, R8, R24 ; /* 0x000000080b187224 */
/* 0x004fc600078e0218 */
/*0280*/ LDG.E R8, [R2.64+0x10] ; /* 0x0000100402087981 */
/* 0x000ea8000c1e1900 */
/*0290*/ LDG.E R11, [R4.64+0x7d00] ; /* 0x007d0004040b7981 */
/* 0x000ea2000c1e1900 */
/*02a0*/ IMAD R24, R25, R22, R24 ; /* 0x0000001619187224 */
/* 0x008fc600078e0218 */
/*02b0*/ LDG.E R22, [R2.64+0x14] ; /* 0x0000140402167981 */
/* 0x000ee2000c1e1900 */
/*02c0*/ IMAD R24, R23, R20, R24 ; /* 0x0000001417187224 */
/* 0x010fc600078e0218 */
/*02d0*/ LDG.E R25, [R4.64+0x8ca0] ; /* 0x008ca00404197981 */
/* 0x000ee8000c1e1900 */
/*02e0*/ LDG.E R20, [R2.64+0x18] ; /* 0x0000180402147981 */
/* 0x000f22000c1e1900 */
/*02f0*/ IMAD R24, R18, R21, R24 ; /* 0x0000001512187224 */
/* 0x020fc600078e0218 */
/*0300*/ LDG.E R23, [R4.64+0x9c40] ; /* 0x009c400404177981 */
/* 0x000f28000c1e1900 */
/*0310*/ LDG.E R18, [R2.64+0x1c] ; /* 0x00001c0402127981 */
/* 0x000f62000c1e1900 */
/*0320*/ IMAD R24, R19, R16, R24 ; /* 0x0000001013187224 */
/* 0x000fc600078e0218 */
/*0330*/ LDG.E R21, [R4.64+0xabe0] ; /* 0x00abe00404157981 */
/* 0x000f68000c1e1900 */
/*0340*/ LDG.E R16, [R2.64+0x20] ; /* 0x0000200402107981 */
/* 0x000f62000c1e1900 */
/*0350*/ IMAD R24, R17, R14, R24 ; /* 0x0000000e11187224 */
/* 0x000fc600078e0218 */
/*0360*/ LDG.E R19, [R4.64+0xbb80] ; /* 0x00bb800404137981 */
/* 0x000f68000c1e1900 */
/*0370*/ LDG.E R14, [R2.64+0x24] ; /* 0x00002404020e7981 */
/* 0x000f62000c1e1900 */
/*0380*/ IMAD R24, R15, R12, R24 ; /* 0x0000000c0f187224 */
/* 0x000fc600078e0218 */
/*0390*/ LDG.E R17, [R4.64+0xcb20] ; /* 0x00cb200404117981 */
/* 0x000f68000c1e1900 */
/*03a0*/ LDG.E R12, [R2.64+0x28] ; /* 0x00002804020c7981 */
/* 0x000f68000c1e1900 */
/*03b0*/ LDG.E R15, [R4.64+0xdac0] ; /* 0x00dac004040f7981 */
/* 0x000f62000c1e1900 */
/*03c0*/ IMAD R24, R13, R10, R24 ; /* 0x0000000a0d187224 */
/* 0x000fc600078e0218 */
/*03d0*/ LDG.E R10, [R2.64+0x2c] ; /* 0x00002c04020a7981 */
/* 0x000f68000c1e1900 */
/*03e0*/ LDG.E R13, [R4.64+0xea60] ; /* 0x00ea6004040d7981 */
/* 0x000f62000c1e1900 */
/*03f0*/ IMAD R24, R11, R8, R24 ; /* 0x000000080b187224 */
/* 0x004fc600078e0218 */
/*0400*/ LDG.E R8, [R2.64+0x30] ; /* 0x0000300402087981 */
/* 0x000ea8000c1e1900 */
/*0410*/ LDG.E R11, [R4.64+0xfa00] ; /* 0x00fa0004040b7981 */
/* 0x000ea2000c1e1900 */
/*0420*/ IMAD R24, R25, R22, R24 ; /* 0x0000001619187224 */
/* 0x008fc600078e0218 */
/*0430*/ LDG.E R22, [R2.64+0x34] ; /* 0x0000340402167981 */
/* 0x000ee8000c1e1900 */
/*0440*/ LDG.E R25, [R4.64+0x109a0] ; /* 0x0109a00404197981 */
/* 0x000ee2000c1e1900 */
/*0450*/ IMAD R24, R23, R20, R24 ; /* 0x0000001417187224 */
/* 0x010fc600078e0218 */
/*0460*/ LDG.E R20, [R2.64+0x38] ; /* 0x0000380402147981 */
/* 0x000f28000c1e1900 */
/*0470*/ LDG.E R23, [R4.64+0x11940] ; /* 0x0119400404177981 */
/* 0x000f22000c1e1900 */
/*0480*/ IMAD R24, R21, R18, R24 ; /* 0x0000001215187224 */
/* 0x020fc600078e0218 */
/*0490*/ LDG.E R18, [R2.64+0x3c] ; /* 0x00003c0402127981 */
/* 0x000f68000c1e1900 */
/*04a0*/ LDG.E R21, [R4.64+0x128e0] ; /* 0x0128e00404157981 */
/* 0x000f62000c1e1900 */
/*04b0*/ IMAD R24, R19, R16, R24 ; /* 0x0000001013187224 */
/* 0x000fc600078e0218 */
/*04c0*/ LDG.E R19, [R2.64+0x40] ; /* 0x0000400402137981 */
/* 0x000f68000c1e1900 */
/*04d0*/ LDG.E R16, [R4.64+0x13880] ; /* 0x0138800404107981 */
/* 0x000f62000c1e1900 */
/*04e0*/ IMAD R24, R17, R14, R24 ; /* 0x0000000e11187224 */
/* 0x000fc600078e0218 */
/*04f0*/ LDG.E R17, [R2.64+0x44] ; /* 0x0000440402117981 */
/* 0x000f68000c1e1900 */
/*0500*/ LDG.E R14, [R4.64+0x14820] ; /* 0x01482004040e7981 */
/* 0x000f62000c1e1900 */
/*0510*/ IMAD R24, R15, R12, R24 ; /* 0x0000000c0f187224 */
/* 0x000fc600078e0218 */
/*0520*/ LDG.E R15, [R2.64+0x48] ; /* 0x00004804020f7981 */
/* 0x000f68000c1e1900 */
/*0530*/ LDG.E R12, [R4.64+0x157c0] ; /* 0x0157c004040c7981 */
/* 0x000f62000c1e1900 */
/*0540*/ IMAD R24, R13, R10, R24 ; /* 0x0000000a0d187224 */
/* 0x000fc600078e0218 */
/*0550*/ LDG.E R13, [R2.64+0x4c] ; /* 0x00004c04020d7981 */
/* 0x000f68000c1e1900 */
/*0560*/ LDG.E R10, [R4.64+0x16760] ; /* 0x01676004040a7981 */
/* 0x000f62000c1e1900 */
/*0570*/ IMAD R24, R11, R8, R24 ; /* 0x000000080b187224 */
/* 0x004fc600078e0218 */
/*0580*/ LDG.E R11, [R2.64+0x50] ; /* 0x00005004020b7981 */
/* 0x000ea8000c1e1900 */
/*0590*/ LDG.E R8, [R4.64+0x17700] ; /* 0x0177000404087981 */
/* 0x000ea2000c1e1900 */
/*05a0*/ IMAD R24, R25, R22, R24 ; /* 0x0000001619187224 */
/* 0x008fc600078e0218 */
/*05b0*/ LDG.E R22, [R2.64+0x54] ; /* 0x0000540402167981 */
/* 0x000ee8000c1e1900 */
/*05c0*/ LDG.E R25, [R4.64+0x186a0] ; /* 0x0186a00404197981 */
/* 0x000ee2000c1e1900 */
/*05d0*/ IMAD R24, R23, R20, R24 ; /* 0x0000001417187224 */
/* 0x010fc600078e0218 */
/*05e0*/ LDG.E R23, [R2.64+0x58] ; /* 0x0000580402177981 */
/* 0x000f28000c1e1900 */
/*05f0*/ LDG.E R20, [R4.64+0x19640] ; /* 0x0196400404147981 */
/* 0x000f22000c1e1900 */
/*0600*/ IMAD R24, R21, R18, R24 ; /* 0x0000001215187224 */
/* 0x020fc600078e0218 */
/*0610*/ LDG.E R21, [R2.64+0x5c] ; /* 0x00005c0402157981 */
/* 0x000f68000c1e1900 */
/*0620*/ LDG.E R18, [R4.64+0x1a5e0] ; /* 0x01a5e00404127981 */
/* 0x000f62000c1e1900 */
/*0630*/ IMAD R24, R16, R19, R24 ; /* 0x0000001310187224 */
/* 0x000fc600078e0218 */
/*0640*/ LDG.E R19, [R2.64+0x60] ; /* 0x0000600402137981 */
/* 0x000f68000c1e1900 */
/*0650*/ LDG.E R16, [R4.64+0x1b580] ; /* 0x01b5800404107981 */
/* 0x000f62000c1e1900 */
/*0660*/ IMAD R24, R14, R17, R24 ; /* 0x000000110e187224 */
/* 0x000fc600078e0218 */
/*0670*/ LDG.E R17, [R2.64+0x64] ; /* 0x0000640402117981 */
/* 0x000f68000c1e1900 */
/*0680*/ LDG.E R14, [R4.64+0x1c520] ; /* 0x01c52004040e7981 */
/* 0x000f62000c1e1900 */
/*0690*/ IMAD R24, R12, R15, R24 ; /* 0x0000000f0c187224 */
/* 0x000fc600078e0218 */
/*06a0*/ LDG.E R15, [R2.64+0x68] ; /* 0x00006804020f7981 */
/* 0x000f68000c1e1900 */
/*06b0*/ LDG.E R12, [R4.64+0x1d4c0] ; /* 0x01d4c004040c7981 */
/* 0x000f62000c1e1900 */
/*06c0*/ IMAD R24, R10, R13, R24 ; /* 0x0000000d0a187224 */
/* 0x000fc600078e0218 */
/*06d0*/ LDG.E R13, [R2.64+0x6c] ; /* 0x00006c04020d7981 */
/* 0x000f68000c1e1900 */
/*06e0*/ LDG.E R10, [R4.64+0x1e460] ; /* 0x01e46004040a7981 */
/* 0x000f62000c1e1900 */
/*06f0*/ IMAD R24, R8, R11, R24 ; /* 0x0000000b08187224 */
/* 0x004fc600078e0218 */
/*0700*/ LDG.E R8, [R2.64+0x70] ; /* 0x0000700402087981 */
/* 0x0000a8000c1e1900 */
/*0710*/ LDG.E R11, [R4.64+0x1f400] ; /* 0x01f40004040b7981 */
/* 0x000ea2000c1e1900 */
/*0720*/ IMAD R24, R25, R22, R24 ; /* 0x0000001619187224 */
/* 0x008fc600078e0218 */
/*0730*/ LDG.E R25, [R2.64+0x74] ; /* 0x0000740402197981 */
/* 0x0000e8000c1e1900 */
/*0740*/ LDG.E R22, [R4.64+0x203a0] ; /* 0x0203a00404167981 */
/* 0x000ee2000c1e1900 */
/*0750*/ IMAD R24, R20, R23, R24 ; /* 0x0000001714187224 */
/* 0x010fc600078e0218 */
/*0760*/ LDG.E R23, [R2.64+0x78] ; /* 0x0000780402177981 */
/* 0x000128000c1e1900 */
/*0770*/ LDG.E R20, [R4.64+0x21340] ; /* 0x0213400404147981 */
/* 0x000f22000c1e1900 */
/*0780*/ IMAD R24, R18, R21, R24 ; /* 0x0000001512187224 */
/* 0x020fc600078e0218 */
/*0790*/ LDG.E R21, [R2.64+0x7c] ; /* 0x00007c0402157981 */
/* 0x000168000c1e1900 */
/*07a0*/ LDG.E R18, [R4.64+0x222e0] ; /* 0x0222e00404127981 */
/* 0x000f62000c1e1900 */
/*07b0*/ IMAD R24, R16, R19, R24 ; /* 0x0000001310187224 */
/* 0x000fc600078e0218 */
/*07c0*/ LDG.E R19, [R2.64+0x80] ; /* 0x0000800402137981 */
/* 0x000168000c1e1900 */
/*07d0*/ LDG.E R16, [R4.64+0x23280] ; /* 0x0232800404107981 */
/* 0x000f62000c1e1900 */
/*07e0*/ IMAD R24, R14, R17, R24 ; /* 0x000000110e187224 */
/* 0x000fc600078e0218 */
/*07f0*/ LDG.E R17, [R2.64+0x84] ; /* 0x0000840402117981 */
/* 0x000168000c1e1900 */
/*0800*/ LDG.E R14, [R4.64+0x24220] ; /* 0x02422004040e7981 */
/* 0x000f62000c1e1900 */
/*0810*/ IMAD R26, R12, R15, R24 ; /* 0x0000000f0c1a7224 */
/* 0x000fc600078e0218 */
/*0820*/ LDG.E R12, [R2.64+0x88] ; /* 0x00008804020c7981 */
/* 0x000168000c1e1900 */
/*0830*/ LDG.E R15, [R4.64+0x251c0] ; /* 0x0251c004040f7981 */
/* 0x000f68000c1e1900 */
/*0840*/ LDG.E R24, [R4.64+0x26160] ; /* 0x0261600404187981 */
/* 0x000f62000c1e1900 */
/*0850*/ IMAD R10, R10, R13, R26 ; /* 0x0000000d0a0a7224 */
/* 0x000fe200078e021a */
/*0860*/ IADD3 R9, R9, 0x28, RZ ; /* 0x0000002809097810 */
/* 0x000fc80007ffe0ff */
/*0870*/ ISETP.NE.AND P0, PT, R9, 0x3e8, PT ; /* 0x000003e80900780c */
/* 0x000fe20003f05270 */
/*0880*/ UIADD3 UR6, UP0, UR6, 0x27100, URZ ; /* 0x0002710006067890 */
/* 0x000fe2000ff1e03f */
/*0890*/ IADD3 R2, P1, R2, 0xa0, RZ ; /* 0x000000a002027810 */
/* 0x001fc60007f3e0ff */
/*08a0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*08b0*/ IADD3.X R3, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff037210 */
/* 0x000fe20000ffe4ff */
/*08c0*/ IMAD R8, R11, R8, R10 ; /* 0x000000080b087224 */
/* 0x004fc800078e020a */
/*08d0*/ IMAD R8, R22, R25, R8 ; /* 0x0000001916087224 */
/* 0x008fc800078e0208 */
/*08e0*/ IMAD R8, R20, R23, R8 ; /* 0x0000001714087224 */
/* 0x010fc800078e0208 */
/*08f0*/ IMAD R8, R18, R21, R8 ; /* 0x0000001512087224 */
/* 0x020fc800078e0208 */
/*0900*/ IMAD R8, R16, R19, R8 ; /* 0x0000001310087224 */
/* 0x000fc800078e0208 */
/*0910*/ IMAD R8, R14, R17, R8 ; /* 0x000000110e087224 */
/* 0x000fc800078e0208 */
/*0920*/ IMAD R8, R15, R12, R8 ; /* 0x0000000c0f087224 */
/* 0x000fc800078e0208 */
/*0930*/ IMAD R24, R24, R27, R8 ; /* 0x0000001b18187224 */
/* 0x000fe200078e0208 */
/*0940*/ @P0 BRA 0x130 ; /* 0xfffff7e000000947 */
/* 0x000fea000383ffff */
/*0950*/ IADD3 R7, R0, R7, RZ ; /* 0x0000000700077210 */
/* 0x000fca0007ffe0ff */
/*0960*/ IMAD.WIDE R6, R7, R6, c[0x0][0x170] ; /* 0x00005c0007067625 */
/* 0x000fca00078e0206 */
/*0970*/ STG.E [R6.64], R24 ; /* 0x0000001806007986 */
/* 0x000fe2000c101904 */
/*0980*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0990*/ BRA 0x990; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*09a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <iostream>
using namespace std;
#define H 1000
#define W 1000
__global__ void multMatCUDA(int *d_a,int *d_b,int *d_c){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < H && col < W){
int result = 0;
for(int k = 0; k < W; k++){
result += d_a[row * W + k] * d_b[k * W + col];
}
d_c[row * W + col] = result;
}
}
void multMat(int *h_a, int *h_b, int *h_c){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
int result = 0;
for(int k = 0; k < W; k++){
result += h_a[i * W + k] * h_b[k * W + j];
}
h_c[i * W + j] = result;
}
}
}
bool compareTo(int *h_c,int *h_result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
if(h_c[i * W + j] != h_result[i * W + j]){
return false;
}
}
}
return true;
}
void printMatrix(int *result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
cout<<result[i * W + j]<<" ";
}
cout<<endl;
}
}
int main(){
clock_t start, end;
double cpu_time_used, gpu_time_used;
float blockSize = 32;
int *h_a, *h_b, *h_c, *d_a, *d_b, *d_c, *h_result;
//Asignar memoria en el host
h_a = (int*)malloc(sizeof(int)*H*W);
h_b = (int*)malloc(sizeof(int)*H*W);
h_c = (int*)malloc(sizeof(int)*H*W);
h_result = (int*)malloc(sizeof(int)*H*W);
//Inicializar las matrices
for(int i = 0; i < H; i++){
for(int j=0; j < W; j++){
h_a[i*W+j] = i;
h_b[i*W+j] = i+1;
h_c[i*W+j] = 0;
}
}
start = clock();
//Llamar funcion que sume dos vectores y retorne el resultado en h_c
multMat(h_a, h_b, h_c);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido CPU = %lf s\n", cpu_time_used);
//Asignacion de memoria en el device
cudaMalloc(&d_a, sizeof(int)*H*W);
cudaMalloc(&d_b, sizeof(int)*H*W);
cudaMalloc(&d_c, sizeof(int)*H*W);
//Copiar los datos del host al device
cudaMemcpy(d_a, h_a, H*W* sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, H*W* sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(ceil(W/blockSize),ceil(H/blockSize),1);
start = clock();
multMatCUDA<<< dimGrid, dimBlock >>>(d_a, d_b, d_c);
cudaMemcpy(h_result, d_c, H*W*sizeof(int), cudaMemcpyDeviceToHost);
end = clock();
gpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido GPU = %lf s\n", gpu_time_used);
if(compareTo(h_c, h_result)){
printf("Matrices Iguales");
}
else{
printf("Matrices Diferentes");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
free(h_result);
return 0;
} | .file "tmpxft_00189f0b_00000000-6_matMult.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3675:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3675:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7multMatPiS_S_
.type _Z7multMatPiS_S_, @function
_Z7multMatPiS_S_:
.LFB3669:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %r12
movq %rsi, %rbp
movq %rdx, %rbx
movl $0, %r11d
.L4:
leaq 4000000(%rbp), %rdi
imulq $4000, %r11, %r9
leaq (%r12,%r9), %r10
addq %rbx, %r9
movl $0, %r8d
.L8:
leaq -4000000(%rdi), %rax
movq %r10, %rcx
movl $0, %esi
.L5:
movl (%rcx), %edx
imull (%rax), %edx
addl %edx, %esi
addq $4, %rcx
addq $4000, %rax
cmpq %rdi, %rax
jne .L5
movl %esi, (%r9,%r8,4)
addq $1, %r8
addq $4, %rdi
cmpq $1000, %r8
jne .L8
addq $1, %r11
cmpq $1000, %r11
jne .L4
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3669:
.size _Z7multMatPiS_S_, .-_Z7multMatPiS_S_
.globl _Z9compareToPiS_
.type _Z9compareToPiS_, @function
_Z9compareToPiS_:
.LFB3670:
.cfi_startproc
endbr64
movl $4000, %edx
.L12:
leaq -4000(%rdx), %rax
.L14:
movl (%rsi,%rax), %ecx
cmpl %ecx, (%rdi,%rax)
jne .L15
addq $4, %rax
cmpq %rdx, %rax
jne .L14
addq $4000, %rdx
cmpq $4004000, %rdx
jne .L12
movl $1, %eax
ret
.L15:
movl $0, %eax
ret
.cfi_endproc
.LFE3670:
.size _Z9compareToPiS_, .-_Z9compareToPiS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string " "
.text
.globl _Z11printMatrixPi
.type _Z11printMatrixPi, @function
_Z11printMatrixPi:
.LFB3671:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movq %rdi, %r14
leaq 4000(%rdi), %rbp
addq $4004000, %r14
leaq _ZSt4cout(%rip), %r12
leaq .LC0(%rip), %r13
jmp .L19
.L27:
call _ZSt16__throw_bad_castv@PLT
.L28:
movzbl 67(%rbx), %eax
.L23:
movsbl %al, %esi
movq %r12, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $4000, %rbp
cmpq %r14, %rbp
je .L18
.L19:
leaq -4000(%rbp), %rbx
.L20:
movl (%rbx), %esi
movq %r12, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %r13, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L20
movq (%r12), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %rbx
testq %rbx, %rbx
je .L27
cmpb $0, 56(%rbx)
jne .L28
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
jmp .L23
.L18:
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3671:
.size _Z11printMatrixPi, .-_Z11printMatrixPi
.globl _Z35__device_stub__Z11multMatCUDAPiS_S_PiS_S_
.type _Z35__device_stub__Z11multMatCUDAPiS_S_PiS_S_, @function
_Z35__device_stub__Z11multMatCUDAPiS_S_PiS_S_:
.LFB3697:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11multMatCUDAPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3697:
.size _Z35__device_stub__Z11multMatCUDAPiS_S_PiS_S_, .-_Z35__device_stub__Z11multMatCUDAPiS_S_PiS_S_
.globl _Z11multMatCUDAPiS_S_
.type _Z11multMatCUDAPiS_S_, @function
_Z11multMatCUDAPiS_S_:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z11multMatCUDAPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _Z11multMatCUDAPiS_S_, .-_Z11multMatCUDAPiS_S_
.section .rodata.str1.1
.LC2:
.string "Tiempo invertido CPU = %lf s\n"
.LC3:
.string "Tiempo invertido GPU = %lf s\n"
.LC4:
.string "Matrices Iguales"
.LC5:
.string "Matrices Diferentes"
.text
.globl main
.type main, @function
main:
.LFB3672:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $4000000, %edi
call malloc@PLT
movq %rax, %r12
movl $4000000, %edi
call malloc@PLT
movq %rax, %rbp
movl $4000000, %edi
call malloc@PLT
movq %rax, %rbx
movl $4000000, %edi
call malloc@PLT
movq %rax, %r13
movl $4000, %ecx
movl $1, %edx
.L38:
leal -1(%rdx), %esi
leaq -4000(%rcx), %rax
.L39:
movl %esi, (%r12,%rax)
movl %edx, 0(%rbp,%rax)
movl $0, (%rbx,%rax)
addq $4, %rax
cmpq %rcx, %rax
jne .L39
addl $1, %edx
addq $4000, %rcx
cmpl $1001, %edx
jne .L38
call clock@PLT
movq %rax, %r14
movq %rbx, %rdx
movq %rbp, %rsi
movq %r12, %rdi
call _Z7multMatPiS_S_
call clock@PLT
subq %r14, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC1(%rip), %xmm0
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq 8(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $4000000, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4000000, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $32, 32(%rsp)
movl $32, 36(%rsp)
movl $1, 40(%rsp)
movl $32, 44(%rsp)
movl $32, 48(%rsp)
movl $1, 52(%rsp)
call clock@PLT
movq %rax, %r14
movl 40(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movq 44(%rsp), %rdi
movl 52(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L47
.L41:
movl $2, %ecx
movl $4000000, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
call clock@PLT
subq %r14, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC1(%rip), %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %r13, %rsi
movq %rbx, %rdi
call _Z9compareToPiS_
testb %al, %al
je .L42
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L43:
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L48
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z35__device_stub__Z11multMatCUDAPiS_S_PiS_S_
jmp .L41
.L42:
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L43
.L48:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3672:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z11multMatCUDAPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3700:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z11multMatCUDAPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3700:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <iostream>
using namespace std;
#define H 1000
#define W 1000
__global__ void multMatCUDA(int *d_a,int *d_b,int *d_c){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < H && col < W){
int result = 0;
for(int k = 0; k < W; k++){
result += d_a[row * W + k] * d_b[k * W + col];
}
d_c[row * W + col] = result;
}
}
void multMat(int *h_a, int *h_b, int *h_c){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
int result = 0;
for(int k = 0; k < W; k++){
result += h_a[i * W + k] * h_b[k * W + j];
}
h_c[i * W + j] = result;
}
}
}
bool compareTo(int *h_c,int *h_result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
if(h_c[i * W + j] != h_result[i * W + j]){
return false;
}
}
}
return true;
}
void printMatrix(int *result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
cout<<result[i * W + j]<<" ";
}
cout<<endl;
}
}
int main(){
clock_t start, end;
double cpu_time_used, gpu_time_used;
float blockSize = 32;
int *h_a, *h_b, *h_c, *d_a, *d_b, *d_c, *h_result;
//Asignar memoria en el host
h_a = (int*)malloc(sizeof(int)*H*W);
h_b = (int*)malloc(sizeof(int)*H*W);
h_c = (int*)malloc(sizeof(int)*H*W);
h_result = (int*)malloc(sizeof(int)*H*W);
//Inicializar las matrices
for(int i = 0; i < H; i++){
for(int j=0; j < W; j++){
h_a[i*W+j] = i;
h_b[i*W+j] = i+1;
h_c[i*W+j] = 0;
}
}
start = clock();
//Llamar funcion que sume dos vectores y retorne el resultado en h_c
multMat(h_a, h_b, h_c);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido CPU = %lf s\n", cpu_time_used);
//Asignacion de memoria en el device
cudaMalloc(&d_a, sizeof(int)*H*W);
cudaMalloc(&d_b, sizeof(int)*H*W);
cudaMalloc(&d_c, sizeof(int)*H*W);
//Copiar los datos del host al device
cudaMemcpy(d_a, h_a, H*W* sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, H*W* sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(ceil(W/blockSize),ceil(H/blockSize),1);
start = clock();
multMatCUDA<<< dimGrid, dimBlock >>>(d_a, d_b, d_c);
cudaMemcpy(h_result, d_c, H*W*sizeof(int), cudaMemcpyDeviceToHost);
end = clock();
gpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido GPU = %lf s\n", gpu_time_used);
if(compareTo(h_c, h_result)){
printf("Matrices Iguales");
}
else{
printf("Matrices Diferentes");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
free(h_result);
return 0;
} | #include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <iostream>
using namespace std;
#define H 1000
#define W 1000
__global__ void multMatCUDA(int *d_a,int *d_b,int *d_c){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < H && col < W){
int result = 0;
for(int k = 0; k < W; k++){
result += d_a[row * W + k] * d_b[k * W + col];
}
d_c[row * W + col] = result;
}
}
void multMat(int *h_a, int *h_b, int *h_c){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
int result = 0;
for(int k = 0; k < W; k++){
result += h_a[i * W + k] * h_b[k * W + j];
}
h_c[i * W + j] = result;
}
}
}
bool compareTo(int *h_c,int *h_result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
if(h_c[i * W + j] != h_result[i * W + j]){
return false;
}
}
}
return true;
}
void printMatrix(int *result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
cout<<result[i * W + j]<<" ";
}
cout<<endl;
}
}
int main(){
clock_t start, end;
double cpu_time_used, gpu_time_used;
float blockSize = 32;
int *h_a, *h_b, *h_c, *d_a, *d_b, *d_c, *h_result;
//Asignar memoria en el host
h_a = (int*)malloc(sizeof(int)*H*W);
h_b = (int*)malloc(sizeof(int)*H*W);
h_c = (int*)malloc(sizeof(int)*H*W);
h_result = (int*)malloc(sizeof(int)*H*W);
//Inicializar las matrices
for(int i = 0; i < H; i++){
for(int j=0; j < W; j++){
h_a[i*W+j] = i;
h_b[i*W+j] = i+1;
h_c[i*W+j] = 0;
}
}
start = clock();
//Llamar funcion que sume dos vectores y retorne el resultado en h_c
multMat(h_a, h_b, h_c);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido CPU = %lf s\n", cpu_time_used);
//Asignacion de memoria en el device
hipMalloc(&d_a, sizeof(int)*H*W);
hipMalloc(&d_b, sizeof(int)*H*W);
hipMalloc(&d_c, sizeof(int)*H*W);
//Copiar los datos del host al device
hipMemcpy(d_a, h_a, H*W* sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, H*W* sizeof(int), hipMemcpyHostToDevice);
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(ceil(W/blockSize),ceil(H/blockSize),1);
start = clock();
multMatCUDA<<< dimGrid, dimBlock >>>(d_a, d_b, d_c);
hipMemcpy(h_result, d_c, H*W*sizeof(int), hipMemcpyDeviceToHost);
end = clock();
gpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido GPU = %lf s\n", gpu_time_used);
if(compareTo(h_c, h_result)){
printf("Matrices Iguales");
}
else{
printf("Matrices Diferentes");
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
free(h_result);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <iostream>
using namespace std;
#define H 1000
#define W 1000
__global__ void multMatCUDA(int *d_a,int *d_b,int *d_c){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < H && col < W){
int result = 0;
for(int k = 0; k < W; k++){
result += d_a[row * W + k] * d_b[k * W + col];
}
d_c[row * W + col] = result;
}
}
void multMat(int *h_a, int *h_b, int *h_c){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
int result = 0;
for(int k = 0; k < W; k++){
result += h_a[i * W + k] * h_b[k * W + j];
}
h_c[i * W + j] = result;
}
}
}
bool compareTo(int *h_c,int *h_result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
if(h_c[i * W + j] != h_result[i * W + j]){
return false;
}
}
}
return true;
}
void printMatrix(int *result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
cout<<result[i * W + j]<<" ";
}
cout<<endl;
}
}
int main(){
clock_t start, end;
double cpu_time_used, gpu_time_used;
float blockSize = 32;
int *h_a, *h_b, *h_c, *d_a, *d_b, *d_c, *h_result;
//Asignar memoria en el host
h_a = (int*)malloc(sizeof(int)*H*W);
h_b = (int*)malloc(sizeof(int)*H*W);
h_c = (int*)malloc(sizeof(int)*H*W);
h_result = (int*)malloc(sizeof(int)*H*W);
//Inicializar las matrices
for(int i = 0; i < H; i++){
for(int j=0; j < W; j++){
h_a[i*W+j] = i;
h_b[i*W+j] = i+1;
h_c[i*W+j] = 0;
}
}
start = clock();
//Llamar funcion que sume dos vectores y retorne el resultado en h_c
multMat(h_a, h_b, h_c);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido CPU = %lf s\n", cpu_time_used);
//Asignacion de memoria en el device
hipMalloc(&d_a, sizeof(int)*H*W);
hipMalloc(&d_b, sizeof(int)*H*W);
hipMalloc(&d_c, sizeof(int)*H*W);
//Copiar los datos del host al device
hipMemcpy(d_a, h_a, H*W* sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, H*W* sizeof(int), hipMemcpyHostToDevice);
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(ceil(W/blockSize),ceil(H/blockSize),1);
start = clock();
multMatCUDA<<< dimGrid, dimBlock >>>(d_a, d_b, d_c);
hipMemcpy(h_result, d_c, H*W*sizeof(int), hipMemcpyDeviceToHost);
end = clock();
gpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido GPU = %lf s\n", gpu_time_used);
if(compareTo(h_c, h_result)){
printf("Matrices Iguales");
}
else{
printf("Matrices Diferentes");
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
free(h_result);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11multMatCUDAPiS_S_
.globl _Z11multMatCUDAPiS_S_
.p2align 8
.type _Z11multMatCUDAPiS_S_,@function
_Z11multMatCUDAPiS_S_:
s_load_b32 s2, s[0:1], 0x24
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v3, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s3, s2, 16
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s15, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s14, s2, v[3:4]
s_mov_b32 s2, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e32 0x3e8, v2
s_cbranch_execz .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v2, v0, 0x3e8
v_mov_b32_e32 v4, 0
s_mov_b64 s[2:3], 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v7, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v8, vcc_lo, s5, v3, vcc_lo
v_mov_b32_e32 v2, v1
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_ashrrev_i32_e32 v3, 31, v2
v_add_co_u32 v5, vcc_lo, v7, s2
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v8, vcc_lo
v_lshlrev_b64 v[9:10], 2, v[2:3]
v_add_nc_u32_e32 v2, 0x3e8, v2
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
s_cmpk_eq_i32 s2, 0xfa0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_u32 v9, vcc_lo, s6, v9
v_add_co_ci_u32_e32 v10, vcc_lo, s7, v10, vcc_lo
global_load_b32 v3, v[5:6], off
global_load_b32 v9, v[9:10], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[5:6], null, v9, v3, v[4:5]
s_delay_alu instid0(VALU_DEP_1)
v_mov_b32_e32 v4, v5
s_cbranch_scc0 .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x10
v_mad_u64_u32 v[2:3], null, v0, 0x3e8, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v5, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11multMatCUDAPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11multMatCUDAPiS_S_, .Lfunc_end0-_Z11multMatCUDAPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11multMatCUDAPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11multMatCUDAPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <iostream>
using namespace std;
#define H 1000
#define W 1000
__global__ void multMatCUDA(int *d_a,int *d_b,int *d_c){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < H && col < W){
int result = 0;
for(int k = 0; k < W; k++){
result += d_a[row * W + k] * d_b[k * W + col];
}
d_c[row * W + col] = result;
}
}
void multMat(int *h_a, int *h_b, int *h_c){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
int result = 0;
for(int k = 0; k < W; k++){
result += h_a[i * W + k] * h_b[k * W + j];
}
h_c[i * W + j] = result;
}
}
}
bool compareTo(int *h_c,int *h_result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
if(h_c[i * W + j] != h_result[i * W + j]){
return false;
}
}
}
return true;
}
void printMatrix(int *result){
for(int i = 0; i < H; i++){
for(int j = 0; j < W; j++){
cout<<result[i * W + j]<<" ";
}
cout<<endl;
}
}
int main(){
clock_t start, end;
double cpu_time_used, gpu_time_used;
float blockSize = 32;
int *h_a, *h_b, *h_c, *d_a, *d_b, *d_c, *h_result;
//Asignar memoria en el host
h_a = (int*)malloc(sizeof(int)*H*W);
h_b = (int*)malloc(sizeof(int)*H*W);
h_c = (int*)malloc(sizeof(int)*H*W);
h_result = (int*)malloc(sizeof(int)*H*W);
//Inicializar las matrices
for(int i = 0; i < H; i++){
for(int j=0; j < W; j++){
h_a[i*W+j] = i;
h_b[i*W+j] = i+1;
h_c[i*W+j] = 0;
}
}
start = clock();
//Llamar funcion que sume dos vectores y retorne el resultado en h_c
multMat(h_a, h_b, h_c);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido CPU = %lf s\n", cpu_time_used);
//Asignacion de memoria en el device
hipMalloc(&d_a, sizeof(int)*H*W);
hipMalloc(&d_b, sizeof(int)*H*W);
hipMalloc(&d_c, sizeof(int)*H*W);
//Copiar los datos del host al device
hipMemcpy(d_a, h_a, H*W* sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, H*W* sizeof(int), hipMemcpyHostToDevice);
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(ceil(W/blockSize),ceil(H/blockSize),1);
start = clock();
multMatCUDA<<< dimGrid, dimBlock >>>(d_a, d_b, d_c);
hipMemcpy(h_result, d_c, H*W*sizeof(int), hipMemcpyDeviceToHost);
end = clock();
gpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido GPU = %lf s\n", gpu_time_used);
if(compareTo(h_c, h_result)){
printf("Matrices Iguales");
}
else{
printf("Matrices Diferentes");
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
free(h_result);
return 0;
} | .text
.file "matMult.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z26__device_stub__multMatCUDAPiS_S_ # -- Begin function _Z26__device_stub__multMatCUDAPiS_S_
.p2align 4, 0x90
.type _Z26__device_stub__multMatCUDAPiS_S_,@function
_Z26__device_stub__multMatCUDAPiS_S_: # @_Z26__device_stub__multMatCUDAPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11multMatCUDAPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z26__device_stub__multMatCUDAPiS_S_, .Lfunc_end0-_Z26__device_stub__multMatCUDAPiS_S_
.cfi_endproc
# -- End function
.globl _Z7multMatPiS_S_ # -- Begin function _Z7multMatPiS_S_
.p2align 4, 0x90
.type _Z7multMatPiS_S_,@function
_Z7multMatPiS_S_: # @_Z7multMatPiS_S_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # %.preheader19
# =>This Loop Header: Depth=1
# Child Loop BB1_2 Depth 2
# Child Loop BB1_3 Depth 3
imulq $4000, %rax, %rcx # imm = 0xFA0
addq %rdx, %rcx
movq %rsi, %r8
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB1_2: # %.preheader
# Parent Loop BB1_1 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB1_3 Depth 3
xorl %r10d, %r10d
movq %r8, %r11
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_3: # Parent Loop BB1_1 Depth=1
# Parent Loop BB1_2 Depth=2
# => This Inner Loop Header: Depth=3
movl (%r11), %ebp
imull (%rdi,%r10,4), %ebp
addl %ebp, %ebx
incq %r10
addq $4000, %r11 # imm = 0xFA0
cmpq $1000, %r10 # imm = 0x3E8
jne .LBB1_3
# %bb.4: # in Loop: Header=BB1_2 Depth=2
movl %ebx, (%rcx,%r9,4)
incq %r9
addq $4, %r8
cmpq $1000, %r9 # imm = 0x3E8
jne .LBB1_2
# %bb.5: # in Loop: Header=BB1_1 Depth=1
incq %rax
addq $4000, %rdi # imm = 0xFA0
cmpq $1000, %rax # imm = 0x3E8
jne .LBB1_1
# %bb.6:
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z7multMatPiS_S_, .Lfunc_end1-_Z7multMatPiS_S_
.cfi_endproc
# -- End function
.globl _Z9compareToPiS_ # -- Begin function _Z9compareToPiS_
.p2align 4, 0x90
.type _Z9compareToPiS_,@function
_Z9compareToPiS_: # @_Z9compareToPiS_
.cfi_startproc
# %bb.0:
leaq 4(%rsi), %rcx
leaq 4(%rdi), %rdx
xorl %r8d, %r8d
xorl %eax, %eax
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_6: # %.critedge
# in Loop: Header=BB2_1 Depth=1
cmpq $999, %r8 # imm = 0x3E7
leaq 1(%r8), %r9
setae %al
addq $4000, %rcx # imm = 0xFA0
addq $4000, %rdx # imm = 0xFA0
movq %r9, %r8
cmpq $1000, %r9 # imm = 0x3E8
je .LBB2_7
.LBB2_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
imulq $4000, %r8, %r9 # imm = 0xFA0
movl (%rdi,%r9), %r10d
cmpl (%rsi,%r9), %r10d
jne .LBB2_7
# %bb.2: # %.lr.ph.preheader
# in Loop: Header=BB2_1 Depth=1
movq $-1, %r9
.p2align 4, 0x90
.LBB2_3: # %.lr.ph
# Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
cmpq $998, %r9 # imm = 0x3E6
je .LBB2_6
# %bb.4: # in Loop: Header=BB2_3 Depth=2
movl 4(%rdx,%r9,4), %r11d
leaq 1(%r9), %r10
cmpl 4(%rcx,%r9,4), %r11d
movq %r10, %r9
je .LBB2_3
# %bb.5: # %._crit_edge
# in Loop: Header=BB2_1 Depth=1
cmpq $999, %r10 # imm = 0x3E7
jae .LBB2_6
.LBB2_7: # %.critedge29
andb $1, %al
# kill: def $al killed $al killed $rax
retq
.Lfunc_end2:
.size _Z9compareToPiS_, .Lfunc_end2-_Z9compareToPiS_
.cfi_endproc
# -- End function
.globl _Z11printMatrixPi # -- Begin function _Z11printMatrixPi
.p2align 4, 0x90
.type _Z11printMatrixPi,@function
_Z11printMatrixPi: # @_Z11printMatrixPi
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
xorl %r15d, %r15d
jmp .LBB3_1
.p2align 4, 0x90
.LBB3_6: # in Loop: Header=BB3_1 Depth=1
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB3_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB3_1 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r15
addq $4000, %rbx # imm = 0xFA0
cmpq $1000, %r15 # imm = 0x3E8
je .LBB3_8
.LBB3_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_2 Depth 2
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_2: # Parent Loop BB3_1 Depth=1
# => This Inner Loop Header: Depth=2
movl (%rbx,%r14,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r14
cmpq $1000, %r14 # imm = 0x3E8
jne .LBB3_2
# %bb.3: # in Loop: Header=BB3_1 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB3_9
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB3_1 Depth=1
cmpb $0, 56(%r14)
je .LBB3_6
# %bb.5: # in Loop: Header=BB3_1 Depth=1
movzbl 67(%r14), %eax
jmp .LBB3_7
.LBB3_8:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB3_9:
.cfi_def_cfa_offset 32
callq _ZSt16__throw_bad_castv
.Lfunc_end3:
.size _Z11printMatrixPi, .Lfunc_end3-_Z11printMatrixPi
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI4_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %rbx
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r14
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r15
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r12
xorl %r13d, %r13d
movl $4000000, %edx # imm = 0x3D0900
movq %r15, %rdi
xorl %esi, %esi
callq memset@PLT
movq %rbx, %rax
movq %r14, %rcx
.p2align 4, 0x90
.LBB4_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB4_2 Depth 2
movq %r13, %rdx
incq %r13
xorl %esi, %esi
.p2align 4, 0x90
.LBB4_2: # Parent Loop BB4_1 Depth=1
# => This Inner Loop Header: Depth=2
movl %edx, (%rax,%rsi,4)
movl %r13d, (%rcx,%rsi,4)
incq %rsi
cmpq $1000, %rsi # imm = 0x3E8
jne .LBB4_2
# %bb.3: # in Loop: Header=BB4_1 Depth=1
addq $4000, %rcx # imm = 0xFA0
addq $4000, %rax # imm = 0xFA0
cmpq $1000, %r13 # imm = 0x3E8
jne .LBB4_1
# %bb.4:
xorl %ebp, %ebp
callq clock
movq %rax, %r13
movq %rbx, %rax
.p2align 4, 0x90
.LBB4_5: # %.preheader19.i
# =>This Loop Header: Depth=1
# Child Loop BB4_6 Depth 2
# Child Loop BB4_7 Depth 3
imulq $4000, %rbp, %rcx # imm = 0xFA0
addq %r15, %rcx
movq %r14, %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB4_6: # %.preheader.i
# Parent Loop BB4_5 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB4_7 Depth 3
xorl %edi, %edi
movq %rdx, %r8
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB4_7: # Parent Loop BB4_5 Depth=1
# Parent Loop BB4_6 Depth=2
# => This Inner Loop Header: Depth=3
movl (%r8), %r10d
imull (%rax,%rdi,4), %r10d
addl %r10d, %r9d
incq %rdi
addq $4000, %r8 # imm = 0xFA0
cmpq $1000, %rdi # imm = 0x3E8
jne .LBB4_7
# %bb.8: # in Loop: Header=BB4_6 Depth=2
movl %r9d, (%rcx,%rsi,4)
incq %rsi
addq $4, %rdx
cmpq $1000, %rsi # imm = 0x3E8
jne .LBB4_6
# %bb.9: # in Loop: Header=BB4_5 Depth=1
incq %rbp
addq $4000, %rax # imm = 0xFA0
cmpq $1000, %rbp # imm = 0x3E8
jne .LBB4_5
# %bb.10: # %_Z7multMatPiS_S_.exit
callq clock
subq %r13, %rax
cvtsi2sd %rax, %xmm0
divsd .LCPI4_0(%rip), %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
leaq 16(%rsp), %rdi
movl $4000000, %esi # imm = 0x3D0900
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4000000, %esi # imm = 0x3D0900
callq hipMalloc
movq %rsp, %rdi
movl $4000000, %esi # imm = 0x3D0900
callq hipMalloc
movq 16(%rsp), %rdi
movl $4000000, %edx # imm = 0x3D0900
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $4000000, %edx # imm = 0x3D0900
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
callq clock
movq %rax, %r13
movabsq $137438953504, %rdi # imm = 0x2000000020
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_12
# %bb.11:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11multMatCUDAPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_12:
movq (%rsp), %rsi
movl $4000000, %edx # imm = 0x3D0900
movq %r12, %rdi
movl $2, %ecx
callq hipMemcpy
callq clock
subq %r13, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
divsd .LCPI4_0(%rip), %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
movl (%r15), %eax
cmpl (%r12), %eax
jne .LBB4_21
# %bb.13: # %.lr.ph.preheader.preheader
movq %r12, %rax
addq $4, %rax
movq %r15, %rdx
addq $4, %rdx
xorl %ecx, %ecx
xorl %edi, %edi
.p2align 4, 0x90
.LBB4_15: # %.lr.ph.preheader
# =>This Loop Header: Depth=1
# Child Loop BB4_16 Depth 2
movq %rdi, %rsi
movq $-1, %rdi
.p2align 4, 0x90
.LBB4_16: # %.lr.ph
# Parent Loop BB4_15 Depth=1
# => This Inner Loop Header: Depth=2
cmpq $998, %rdi # imm = 0x3E6
je .LBB4_19
# %bb.17: # in Loop: Header=BB4_16 Depth=2
movl 4(%rdx,%rdi,4), %r9d
leaq 1(%rdi), %r8
cmpl 4(%rax,%rdi,4), %r9d
movq %r8, %rdi
je .LBB4_16
# %bb.18: # %._crit_edge
# in Loop: Header=BB4_15 Depth=1
cmpq $999, %r8 # imm = 0x3E7
jb .LBB4_20
.LBB4_19: # %.critedge.i
# in Loop: Header=BB4_15 Depth=1
leaq 1(%rsi), %rdi
cmpq $999, %rsi # imm = 0x3E7
setae %cl
cmpq $1000, %rdi # imm = 0x3E8
je .LBB4_20
# %bb.14: # %.preheader.i44
# in Loop: Header=BB4_15 Depth=1
imulq $4000, %rdi, %rsi # imm = 0xFA0
movl (%r15,%rsi), %r8d
addq $4000, %rax # imm = 0xFA0
addq $4000, %rdx # imm = 0xFA0
cmpl (%r12,%rsi), %r8d
je .LBB4_15
.LBB4_20: # %_Z9compareToPiS_.exit
movl $.L.str.3, %edi
testb $1, %cl
jne .LBB4_22
.LBB4_21: # %.critedge
movl $.L.str.4, %edi
.LBB4_22:
xorl %eax, %eax
callq printf
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11multMatCUDAPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11multMatCUDAPiS_S_,@object # @_Z11multMatCUDAPiS_S_
.section .rodata,"a",@progbits
.globl _Z11multMatCUDAPiS_S_
.p2align 3, 0x0
_Z11multMatCUDAPiS_S_:
.quad _Z26__device_stub__multMatCUDAPiS_S_
.size _Z11multMatCUDAPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " "
.size .L.str, 2
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Tiempo invertido CPU = %lf s\n"
.size .L.str.1, 30
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Tiempo invertido GPU = %lf s\n"
.size .L.str.2, 30
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Matrices Iguales"
.size .L.str.3, 17
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Matrices Diferentes"
.size .L.str.4, 20
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11multMatCUDAPiS_S_"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__multMatCUDAPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11multMatCUDAPiS_S_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z11multMatCUDAPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R7, SR_CTAID.Y ; /* 0x0000000000077919 */
/* 0x000e680000002600 */
/*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GT.AND P0, PT, R0, 0x3e7, PT ; /* 0x000003e70000780c */
/* 0x000fe20003f04270 */
/*0070*/ IMAD R7, R7, c[0x0][0x4], R2 ; /* 0x0000010007077a24 */
/* 0x002fca00078e0202 */
/*0080*/ ISETP.GT.OR P0, PT, R7, 0x3e7, P0 ; /* 0x000003e70700780c */
/* 0x000fda0000704670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R7, R7, 0x3e8, RZ ; /* 0x000003e807077824 */
/* 0x000fe200078e02ff */
/*00c0*/ HFMA2.MMA R9, -RZ, RZ, 0, 0 ; /* 0x00000000ff097435 */
/* 0x000fe200000001ff */
/*00d0*/ MOV R24, RZ ; /* 0x000000ff00187202 */
/* 0x000fe20000000f00 */
/*00e0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*00f0*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */
/* 0x000fc80000000a00 */
/*0100*/ IMAD.WIDE R2, R7, R6, c[0x0][0x160] ; /* 0x0000580007027625 */
/* 0x000fca00078e0206 */
/*0110*/ IADD3 R2, P0, R2, 0x10, RZ ; /* 0x0000001002027810 */
/* 0x000fc80007f1e0ff */
/*0120*/ IADD3.X R3, RZ, R3, RZ, P0, !PT ; /* 0x00000003ff037210 */
/* 0x000fc800007fe4ff */
/*0130*/ MOV R4, UR6 ; /* 0x0000000600047c02 */
/* 0x000fe20008000f00 */
/*0140*/ LDG.E R8, [R2.64+-0x10] ; /* 0xfffff00402087981 */
/* 0x000ea2000c1e1900 */
/*0150*/ MOV R5, UR7 ; /* 0x0000000700057c02 */
/* 0x000fc60008000f00 */
/*0160*/ LDG.E R22, [R2.64+-0xc] ; /* 0xfffff40402167981 */
/* 0x000ee4000c1e1900 */
/*0170*/ IMAD.WIDE R4, R0, 0x4, R4 ; /* 0x0000000400047825 */
/* 0x000fe400078e0204 */
/*0180*/ LDG.E R20, [R2.64+-0x8] ; /* 0xfffff80402147981 */
/* 0x000f28000c1e1900 */
/*0190*/ LDG.E R11, [R4.64] ; /* 0x00000004040b7981 */
/* 0x000ea8000c1e1900 */
/*01a0*/ LDG.E R25, [R4.64+0xfa0] ; /* 0x000fa00404197981 */
/* 0x000ee8000c1e1900 */
/*01b0*/ LDG.E R23, [R4.64+0x1f40] ; /* 0x001f400404177981 */
/* 0x000f28000c1e1900 */
/*01c0*/ LDG.E R21, [R2.64+-0x4] ; /* 0xfffffc0402157981 */
/* 0x000f68000c1e1900 */
/*01d0*/ LDG.E R18, [R4.64+0x2ee0] ; /* 0x002ee00404127981 */
/* 0x000f68000c1e1900 */
/*01e0*/ LDG.E R16, [R2.64] ; /* 0x0000000402107981 */
/* 0x000f68000c1e1900 */
/*01f0*/ LDG.E R19, [R4.64+0x3e80] ; /* 0x003e800404137981 */
/* 0x000f68000c1e1900 */
/*0200*/ LDG.E R14, [R2.64+0x4] ; /* 0x00000404020e7981 */
/* 0x000f68000c1e1900 */
/*0210*/ LDG.E R17, [R4.64+0x4e20] ; /* 0x004e200404117981 */
/* 0x000f68000c1e1900 */
/*0220*/ LDG.E R12, [R2.64+0x8] ; /* 0x00000804020c7981 */
/* 0x000f68000c1e1900 */
/*0230*/ LDG.E R15, [R4.64+0x5dc0] ; /* 0x005dc004040f7981 */
/* 0x000f68000c1e1900 */
/*0240*/ LDG.E R10, [R2.64+0xc] ; /* 0x00000c04020a7981 */
/* 0x000f68000c1e1900 */
/*0250*/ LDG.E R13, [R4.64+0x6d60] ; /* 0x006d6004040d7981 */
/* 0x000f68000c1e1900 */
/*0260*/ LDG.E R27, [R2.64+0x8c] ; /* 0x00008c04021b7981 */
/* 0x000162000c1e1900 */
/*0270*/ IMAD R24, R11, R8, R24 ; /* 0x000000080b187224 */
/* 0x004fc600078e0218 */
/*0280*/ LDG.E R8, [R2.64+0x10] ; /* 0x0000100402087981 */
/* 0x000ea8000c1e1900 */
/*0290*/ LDG.E R11, [R4.64+0x7d00] ; /* 0x007d0004040b7981 */
/* 0x000ea2000c1e1900 */
/*02a0*/ IMAD R24, R25, R22, R24 ; /* 0x0000001619187224 */
/* 0x008fc600078e0218 */
/*02b0*/ LDG.E R22, [R2.64+0x14] ; /* 0x0000140402167981 */
/* 0x000ee2000c1e1900 */
/*02c0*/ IMAD R24, R23, R20, R24 ; /* 0x0000001417187224 */
/* 0x010fc600078e0218 */
/*02d0*/ LDG.E R25, [R4.64+0x8ca0] ; /* 0x008ca00404197981 */
/* 0x000ee8000c1e1900 */
/*02e0*/ LDG.E R20, [R2.64+0x18] ; /* 0x0000180402147981 */
/* 0x000f22000c1e1900 */
/*02f0*/ IMAD R24, R18, R21, R24 ; /* 0x0000001512187224 */
/* 0x020fc600078e0218 */
/*0300*/ LDG.E R23, [R4.64+0x9c40] ; /* 0x009c400404177981 */
/* 0x000f28000c1e1900 */
/*0310*/ LDG.E R18, [R2.64+0x1c] ; /* 0x00001c0402127981 */
/* 0x000f62000c1e1900 */
/*0320*/ IMAD R24, R19, R16, R24 ; /* 0x0000001013187224 */
/* 0x000fc600078e0218 */
/*0330*/ LDG.E R21, [R4.64+0xabe0] ; /* 0x00abe00404157981 */
/* 0x000f68000c1e1900 */
/*0340*/ LDG.E R16, [R2.64+0x20] ; /* 0x0000200402107981 */
/* 0x000f62000c1e1900 */
/*0350*/ IMAD R24, R17, R14, R24 ; /* 0x0000000e11187224 */
/* 0x000fc600078e0218 */
/*0360*/ LDG.E R19, [R4.64+0xbb80] ; /* 0x00bb800404137981 */
/* 0x000f68000c1e1900 */
/*0370*/ LDG.E R14, [R2.64+0x24] ; /* 0x00002404020e7981 */
/* 0x000f62000c1e1900 */
/*0380*/ IMAD R24, R15, R12, R24 ; /* 0x0000000c0f187224 */
/* 0x000fc600078e0218 */
/*0390*/ LDG.E R17, [R4.64+0xcb20] ; /* 0x00cb200404117981 */
/* 0x000f68000c1e1900 */
/*03a0*/ LDG.E R12, [R2.64+0x28] ; /* 0x00002804020c7981 */
/* 0x000f68000c1e1900 */
/*03b0*/ LDG.E R15, [R4.64+0xdac0] ; /* 0x00dac004040f7981 */
/* 0x000f62000c1e1900 */
/*03c0*/ IMAD R24, R13, R10, R24 ; /* 0x0000000a0d187224 */
/* 0x000fc600078e0218 */
/*03d0*/ LDG.E R10, [R2.64+0x2c] ; /* 0x00002c04020a7981 */
/* 0x000f68000c1e1900 */
/*03e0*/ LDG.E R13, [R4.64+0xea60] ; /* 0x00ea6004040d7981 */
/* 0x000f62000c1e1900 */
/*03f0*/ IMAD R24, R11, R8, R24 ; /* 0x000000080b187224 */
/* 0x004fc600078e0218 */
/*0400*/ LDG.E R8, [R2.64+0x30] ; /* 0x0000300402087981 */
/* 0x000ea8000c1e1900 */
/*0410*/ LDG.E R11, [R4.64+0xfa00] ; /* 0x00fa0004040b7981 */
/* 0x000ea2000c1e1900 */
/*0420*/ IMAD R24, R25, R22, R24 ; /* 0x0000001619187224 */
/* 0x008fc600078e0218 */
/*0430*/ LDG.E R22, [R2.64+0x34] ; /* 0x0000340402167981 */
/* 0x000ee8000c1e1900 */
/*0440*/ LDG.E R25, [R4.64+0x109a0] ; /* 0x0109a00404197981 */
/* 0x000ee2000c1e1900 */
/*0450*/ IMAD R24, R23, R20, R24 ; /* 0x0000001417187224 */
/* 0x010fc600078e0218 */
/*0460*/ LDG.E R20, [R2.64+0x38] ; /* 0x0000380402147981 */
/* 0x000f28000c1e1900 */
/*0470*/ LDG.E R23, [R4.64+0x11940] ; /* 0x0119400404177981 */
/* 0x000f22000c1e1900 */
/*0480*/ IMAD R24, R21, R18, R24 ; /* 0x0000001215187224 */
/* 0x020fc600078e0218 */
/*0490*/ LDG.E R18, [R2.64+0x3c] ; /* 0x00003c0402127981 */
/* 0x000f68000c1e1900 */
/*04a0*/ LDG.E R21, [R4.64+0x128e0] ; /* 0x0128e00404157981 */
/* 0x000f62000c1e1900 */
/*04b0*/ IMAD R24, R19, R16, R24 ; /* 0x0000001013187224 */
/* 0x000fc600078e0218 */
/*04c0*/ LDG.E R19, [R2.64+0x40] ; /* 0x0000400402137981 */
/* 0x000f68000c1e1900 */
/*04d0*/ LDG.E R16, [R4.64+0x13880] ; /* 0x0138800404107981 */
/* 0x000f62000c1e1900 */
/*04e0*/ IMAD R24, R17, R14, R24 ; /* 0x0000000e11187224 */
/* 0x000fc600078e0218 */
/*04f0*/ LDG.E R17, [R2.64+0x44] ; /* 0x0000440402117981 */
/* 0x000f68000c1e1900 */
/*0500*/ LDG.E R14, [R4.64+0x14820] ; /* 0x01482004040e7981 */
/* 0x000f62000c1e1900 */
/*0510*/ IMAD R24, R15, R12, R24 ; /* 0x0000000c0f187224 */
/* 0x000fc600078e0218 */
/*0520*/ LDG.E R15, [R2.64+0x48] ; /* 0x00004804020f7981 */
/* 0x000f68000c1e1900 */
/*0530*/ LDG.E R12, [R4.64+0x157c0] ; /* 0x0157c004040c7981 */
/* 0x000f62000c1e1900 */
/*0540*/ IMAD R24, R13, R10, R24 ; /* 0x0000000a0d187224 */
/* 0x000fc600078e0218 */
/*0550*/ LDG.E R13, [R2.64+0x4c] ; /* 0x00004c04020d7981 */
/* 0x000f68000c1e1900 */
/*0560*/ LDG.E R10, [R4.64+0x16760] ; /* 0x01676004040a7981 */
/* 0x000f62000c1e1900 */
/*0570*/ IMAD R24, R11, R8, R24 ; /* 0x000000080b187224 */
/* 0x004fc600078e0218 */
/*0580*/ LDG.E R11, [R2.64+0x50] ; /* 0x00005004020b7981 */
/* 0x000ea8000c1e1900 */
/*0590*/ LDG.E R8, [R4.64+0x17700] ; /* 0x0177000404087981 */
/* 0x000ea2000c1e1900 */
/*05a0*/ IMAD R24, R25, R22, R24 ; /* 0x0000001619187224 */
/* 0x008fc600078e0218 */
/*05b0*/ LDG.E R22, [R2.64+0x54] ; /* 0x0000540402167981 */
/* 0x000ee8000c1e1900 */
/*05c0*/ LDG.E R25, [R4.64+0x186a0] ; /* 0x0186a00404197981 */
/* 0x000ee2000c1e1900 */
/*05d0*/ IMAD R24, R23, R20, R24 ; /* 0x0000001417187224 */
/* 0x010fc600078e0218 */
/*05e0*/ LDG.E R23, [R2.64+0x58] ; /* 0x0000580402177981 */
/* 0x000f28000c1e1900 */
/*05f0*/ LDG.E R20, [R4.64+0x19640] ; /* 0x0196400404147981 */
/* 0x000f22000c1e1900 */
/*0600*/ IMAD R24, R21, R18, R24 ; /* 0x0000001215187224 */
/* 0x020fc600078e0218 */
/*0610*/ LDG.E R21, [R2.64+0x5c] ; /* 0x00005c0402157981 */
/* 0x000f68000c1e1900 */
/*0620*/ LDG.E R18, [R4.64+0x1a5e0] ; /* 0x01a5e00404127981 */
/* 0x000f62000c1e1900 */
/*0630*/ IMAD R24, R16, R19, R24 ; /* 0x0000001310187224 */
/* 0x000fc600078e0218 */
/*0640*/ LDG.E R19, [R2.64+0x60] ; /* 0x0000600402137981 */
/* 0x000f68000c1e1900 */
/*0650*/ LDG.E R16, [R4.64+0x1b580] ; /* 0x01b5800404107981 */
/* 0x000f62000c1e1900 */
/*0660*/ IMAD R24, R14, R17, R24 ; /* 0x000000110e187224 */
/* 0x000fc600078e0218 */
/*0670*/ LDG.E R17, [R2.64+0x64] ; /* 0x0000640402117981 */
/* 0x000f68000c1e1900 */
/*0680*/ LDG.E R14, [R4.64+0x1c520] ; /* 0x01c52004040e7981 */
/* 0x000f62000c1e1900 */
/*0690*/ IMAD R24, R12, R15, R24 ; /* 0x0000000f0c187224 */
/* 0x000fc600078e0218 */
/*06a0*/ LDG.E R15, [R2.64+0x68] ; /* 0x00006804020f7981 */
/* 0x000f68000c1e1900 */
/*06b0*/ LDG.E R12, [R4.64+0x1d4c0] ; /* 0x01d4c004040c7981 */
/* 0x000f62000c1e1900 */
/*06c0*/ IMAD R24, R10, R13, R24 ; /* 0x0000000d0a187224 */
/* 0x000fc600078e0218 */
/*06d0*/ LDG.E R13, [R2.64+0x6c] ; /* 0x00006c04020d7981 */
/* 0x000f68000c1e1900 */
/*06e0*/ LDG.E R10, [R4.64+0x1e460] ; /* 0x01e46004040a7981 */
/* 0x000f62000c1e1900 */
/*06f0*/ IMAD R24, R8, R11, R24 ; /* 0x0000000b08187224 */
/* 0x004fc600078e0218 */
/*0700*/ LDG.E R8, [R2.64+0x70] ; /* 0x0000700402087981 */
/* 0x0000a8000c1e1900 */
/*0710*/ LDG.E R11, [R4.64+0x1f400] ; /* 0x01f40004040b7981 */
/* 0x000ea2000c1e1900 */
/*0720*/ IMAD R24, R25, R22, R24 ; /* 0x0000001619187224 */
/* 0x008fc600078e0218 */
/*0730*/ LDG.E R25, [R2.64+0x74] ; /* 0x0000740402197981 */
/* 0x0000e8000c1e1900 */
/*0740*/ LDG.E R22, [R4.64+0x203a0] ; /* 0x0203a00404167981 */
/* 0x000ee2000c1e1900 */
/*0750*/ IMAD R24, R20, R23, R24 ; /* 0x0000001714187224 */
/* 0x010fc600078e0218 */
/*0760*/ LDG.E R23, [R2.64+0x78] ; /* 0x0000780402177981 */
/* 0x000128000c1e1900 */
/*0770*/ LDG.E R20, [R4.64+0x21340] ; /* 0x0213400404147981 */
/* 0x000f22000c1e1900 */
/*0780*/ IMAD R24, R18, R21, R24 ; /* 0x0000001512187224 */
/* 0x020fc600078e0218 */
/*0790*/ LDG.E R21, [R2.64+0x7c] ; /* 0x00007c0402157981 */
/* 0x000168000c1e1900 */
/*07a0*/ LDG.E R18, [R4.64+0x222e0] ; /* 0x0222e00404127981 */
/* 0x000f62000c1e1900 */
/*07b0*/ IMAD R24, R16, R19, R24 ; /* 0x0000001310187224 */
/* 0x000fc600078e0218 */
/*07c0*/ LDG.E R19, [R2.64+0x80] ; /* 0x0000800402137981 */
/* 0x000168000c1e1900 */
/*07d0*/ LDG.E R16, [R4.64+0x23280] ; /* 0x0232800404107981 */
/* 0x000f62000c1e1900 */
/*07e0*/ IMAD R24, R14, R17, R24 ; /* 0x000000110e187224 */
/* 0x000fc600078e0218 */
/*07f0*/ LDG.E R17, [R2.64+0x84] ; /* 0x0000840402117981 */
/* 0x000168000c1e1900 */
/*0800*/ LDG.E R14, [R4.64+0x24220] ; /* 0x02422004040e7981 */
/* 0x000f62000c1e1900 */
/*0810*/ IMAD R26, R12, R15, R24 ; /* 0x0000000f0c1a7224 */
/* 0x000fc600078e0218 */
/*0820*/ LDG.E R12, [R2.64+0x88] ; /* 0x00008804020c7981 */
/* 0x000168000c1e1900 */
/*0830*/ LDG.E R15, [R4.64+0x251c0] ; /* 0x0251c004040f7981 */
/* 0x000f68000c1e1900 */
/*0840*/ LDG.E R24, [R4.64+0x26160] ; /* 0x0261600404187981 */
/* 0x000f62000c1e1900 */
/*0850*/ IMAD R10, R10, R13, R26 ; /* 0x0000000d0a0a7224 */
/* 0x000fe200078e021a */
/*0860*/ IADD3 R9, R9, 0x28, RZ ; /* 0x0000002809097810 */
/* 0x000fc80007ffe0ff */
/*0870*/ ISETP.NE.AND P0, PT, R9, 0x3e8, PT ; /* 0x000003e80900780c */
/* 0x000fe20003f05270 */
/*0880*/ UIADD3 UR6, UP0, UR6, 0x27100, URZ ; /* 0x0002710006067890 */
/* 0x000fe2000ff1e03f */
/*0890*/ IADD3 R2, P1, R2, 0xa0, RZ ; /* 0x000000a002027810 */
/* 0x001fc60007f3e0ff */
/*08a0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*08b0*/ IADD3.X R3, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff037210 */
/* 0x000fe20000ffe4ff */
/*08c0*/ IMAD R8, R11, R8, R10 ; /* 0x000000080b087224 */
/* 0x004fc800078e020a */
/*08d0*/ IMAD R8, R22, R25, R8 ; /* 0x0000001916087224 */
/* 0x008fc800078e0208 */
/*08e0*/ IMAD R8, R20, R23, R8 ; /* 0x0000001714087224 */
/* 0x010fc800078e0208 */
/*08f0*/ IMAD R8, R18, R21, R8 ; /* 0x0000001512087224 */
/* 0x020fc800078e0208 */
/*0900*/ IMAD R8, R16, R19, R8 ; /* 0x0000001310087224 */
/* 0x000fc800078e0208 */
/*0910*/ IMAD R8, R14, R17, R8 ; /* 0x000000110e087224 */
/* 0x000fc800078e0208 */
/*0920*/ IMAD R8, R15, R12, R8 ; /* 0x0000000c0f087224 */
/* 0x000fc800078e0208 */
/*0930*/ IMAD R24, R24, R27, R8 ; /* 0x0000001b18187224 */
/* 0x000fe200078e0208 */
/*0940*/ @P0 BRA 0x130 ; /* 0xfffff7e000000947 */
/* 0x000fea000383ffff */
/*0950*/ IADD3 R7, R0, R7, RZ ; /* 0x0000000700077210 */
/* 0x000fca0007ffe0ff */
/*0960*/ IMAD.WIDE R6, R7, R6, c[0x0][0x170] ; /* 0x00005c0007067625 */
/* 0x000fca00078e0206 */
/*0970*/ STG.E [R6.64], R24 ; /* 0x0000001806007986 */
/* 0x000fe2000c101904 */
/*0980*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0990*/ BRA 0x990; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*09a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11multMatCUDAPiS_S_
.globl _Z11multMatCUDAPiS_S_
.p2align 8
.type _Z11multMatCUDAPiS_S_,@function
_Z11multMatCUDAPiS_S_:
s_load_b32 s2, s[0:1], 0x24
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v3, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s3, s2, 16
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s15, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s14, s2, v[3:4]
s_mov_b32 s2, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e32 0x3e8, v2
s_cbranch_execz .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v2, v0, 0x3e8
v_mov_b32_e32 v4, 0
s_mov_b64 s[2:3], 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v7, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v8, vcc_lo, s5, v3, vcc_lo
v_mov_b32_e32 v2, v1
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_ashrrev_i32_e32 v3, 31, v2
v_add_co_u32 v5, vcc_lo, v7, s2
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v8, vcc_lo
v_lshlrev_b64 v[9:10], 2, v[2:3]
v_add_nc_u32_e32 v2, 0x3e8, v2
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
s_cmpk_eq_i32 s2, 0xfa0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_u32 v9, vcc_lo, s6, v9
v_add_co_ci_u32_e32 v10, vcc_lo, s7, v10, vcc_lo
global_load_b32 v3, v[5:6], off
global_load_b32 v9, v[9:10], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[5:6], null, v9, v3, v[4:5]
s_delay_alu instid0(VALU_DEP_1)
v_mov_b32_e32 v4, v5
s_cbranch_scc0 .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x10
v_mad_u64_u32 v[2:3], null, v0, 0x3e8, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v5, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11multMatCUDAPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11multMatCUDAPiS_S_, .Lfunc_end0-_Z11multMatCUDAPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11multMatCUDAPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11multMatCUDAPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00189f0b_00000000-6_matMult.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3675:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3675:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7multMatPiS_S_
.type _Z7multMatPiS_S_, @function
_Z7multMatPiS_S_:
.LFB3669:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %r12
movq %rsi, %rbp
movq %rdx, %rbx
movl $0, %r11d
.L4:
leaq 4000000(%rbp), %rdi
imulq $4000, %r11, %r9
leaq (%r12,%r9), %r10
addq %rbx, %r9
movl $0, %r8d
.L8:
leaq -4000000(%rdi), %rax
movq %r10, %rcx
movl $0, %esi
.L5:
movl (%rcx), %edx
imull (%rax), %edx
addl %edx, %esi
addq $4, %rcx
addq $4000, %rax
cmpq %rdi, %rax
jne .L5
movl %esi, (%r9,%r8,4)
addq $1, %r8
addq $4, %rdi
cmpq $1000, %r8
jne .L8
addq $1, %r11
cmpq $1000, %r11
jne .L4
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3669:
.size _Z7multMatPiS_S_, .-_Z7multMatPiS_S_
.globl _Z9compareToPiS_
.type _Z9compareToPiS_, @function
_Z9compareToPiS_:
.LFB3670:
.cfi_startproc
endbr64
movl $4000, %edx
.L12:
leaq -4000(%rdx), %rax
.L14:
movl (%rsi,%rax), %ecx
cmpl %ecx, (%rdi,%rax)
jne .L15
addq $4, %rax
cmpq %rdx, %rax
jne .L14
addq $4000, %rdx
cmpq $4004000, %rdx
jne .L12
movl $1, %eax
ret
.L15:
movl $0, %eax
ret
.cfi_endproc
.LFE3670:
.size _Z9compareToPiS_, .-_Z9compareToPiS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string " "
.text
.globl _Z11printMatrixPi
.type _Z11printMatrixPi, @function
_Z11printMatrixPi:
.LFB3671:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movq %rdi, %r14
leaq 4000(%rdi), %rbp
addq $4004000, %r14
leaq _ZSt4cout(%rip), %r12
leaq .LC0(%rip), %r13
jmp .L19
.L27:
call _ZSt16__throw_bad_castv@PLT
.L28:
movzbl 67(%rbx), %eax
.L23:
movsbl %al, %esi
movq %r12, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $4000, %rbp
cmpq %r14, %rbp
je .L18
.L19:
leaq -4000(%rbp), %rbx
.L20:
movl (%rbx), %esi
movq %r12, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %r13, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L20
movq (%r12), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %rbx
testq %rbx, %rbx
je .L27
cmpb $0, 56(%rbx)
jne .L28
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
jmp .L23
.L18:
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3671:
.size _Z11printMatrixPi, .-_Z11printMatrixPi
.globl _Z35__device_stub__Z11multMatCUDAPiS_S_PiS_S_
.type _Z35__device_stub__Z11multMatCUDAPiS_S_PiS_S_, @function
_Z35__device_stub__Z11multMatCUDAPiS_S_PiS_S_:
.LFB3697:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11multMatCUDAPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3697:
.size _Z35__device_stub__Z11multMatCUDAPiS_S_PiS_S_, .-_Z35__device_stub__Z11multMatCUDAPiS_S_PiS_S_
.globl _Z11multMatCUDAPiS_S_
.type _Z11multMatCUDAPiS_S_, @function
_Z11multMatCUDAPiS_S_:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z11multMatCUDAPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _Z11multMatCUDAPiS_S_, .-_Z11multMatCUDAPiS_S_
.section .rodata.str1.1
.LC2:
.string "Tiempo invertido CPU = %lf s\n"
.LC3:
.string "Tiempo invertido GPU = %lf s\n"
.LC4:
.string "Matrices Iguales"
.LC5:
.string "Matrices Diferentes"
.text
.globl main
.type main, @function
main:
.LFB3672:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $4000000, %edi
call malloc@PLT
movq %rax, %r12
movl $4000000, %edi
call malloc@PLT
movq %rax, %rbp
movl $4000000, %edi
call malloc@PLT
movq %rax, %rbx
movl $4000000, %edi
call malloc@PLT
movq %rax, %r13
movl $4000, %ecx
movl $1, %edx
.L38:
leal -1(%rdx), %esi
leaq -4000(%rcx), %rax
.L39:
movl %esi, (%r12,%rax)
movl %edx, 0(%rbp,%rax)
movl $0, (%rbx,%rax)
addq $4, %rax
cmpq %rcx, %rax
jne .L39
addl $1, %edx
addq $4000, %rcx
cmpl $1001, %edx
jne .L38
call clock@PLT
movq %rax, %r14
movq %rbx, %rdx
movq %rbp, %rsi
movq %r12, %rdi
call _Z7multMatPiS_S_
call clock@PLT
subq %r14, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC1(%rip), %xmm0
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq 8(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $4000000, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4000000, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $32, 32(%rsp)
movl $32, 36(%rsp)
movl $1, 40(%rsp)
movl $32, 44(%rsp)
movl $32, 48(%rsp)
movl $1, 52(%rsp)
call clock@PLT
movq %rax, %r14
movl 40(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movq 44(%rsp), %rdi
movl 52(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L47
.L41:
movl $2, %ecx
movl $4000000, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
call clock@PLT
subq %r14, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC1(%rip), %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %r13, %rsi
movq %rbx, %rdi
call _Z9compareToPiS_
testb %al, %al
je .L42
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L43:
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L48
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z35__device_stub__Z11multMatCUDAPiS_S_PiS_S_
jmp .L41
.L42:
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L43
.L48:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3672:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z11multMatCUDAPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3700:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z11multMatCUDAPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3700:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "matMult.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z26__device_stub__multMatCUDAPiS_S_ # -- Begin function _Z26__device_stub__multMatCUDAPiS_S_
.p2align 4, 0x90
.type _Z26__device_stub__multMatCUDAPiS_S_,@function
_Z26__device_stub__multMatCUDAPiS_S_: # @_Z26__device_stub__multMatCUDAPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11multMatCUDAPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z26__device_stub__multMatCUDAPiS_S_, .Lfunc_end0-_Z26__device_stub__multMatCUDAPiS_S_
.cfi_endproc
# -- End function
.globl _Z7multMatPiS_S_ # -- Begin function _Z7multMatPiS_S_
.p2align 4, 0x90
.type _Z7multMatPiS_S_,@function
_Z7multMatPiS_S_: # @_Z7multMatPiS_S_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # %.preheader19
# =>This Loop Header: Depth=1
# Child Loop BB1_2 Depth 2
# Child Loop BB1_3 Depth 3
imulq $4000, %rax, %rcx # imm = 0xFA0
addq %rdx, %rcx
movq %rsi, %r8
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB1_2: # %.preheader
# Parent Loop BB1_1 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB1_3 Depth 3
xorl %r10d, %r10d
movq %r8, %r11
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_3: # Parent Loop BB1_1 Depth=1
# Parent Loop BB1_2 Depth=2
# => This Inner Loop Header: Depth=3
movl (%r11), %ebp
imull (%rdi,%r10,4), %ebp
addl %ebp, %ebx
incq %r10
addq $4000, %r11 # imm = 0xFA0
cmpq $1000, %r10 # imm = 0x3E8
jne .LBB1_3
# %bb.4: # in Loop: Header=BB1_2 Depth=2
movl %ebx, (%rcx,%r9,4)
incq %r9
addq $4, %r8
cmpq $1000, %r9 # imm = 0x3E8
jne .LBB1_2
# %bb.5: # in Loop: Header=BB1_1 Depth=1
incq %rax
addq $4000, %rdi # imm = 0xFA0
cmpq $1000, %rax # imm = 0x3E8
jne .LBB1_1
# %bb.6:
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z7multMatPiS_S_, .Lfunc_end1-_Z7multMatPiS_S_
.cfi_endproc
# -- End function
.globl _Z9compareToPiS_ # -- Begin function _Z9compareToPiS_
.p2align 4, 0x90
.type _Z9compareToPiS_,@function
_Z9compareToPiS_: # @_Z9compareToPiS_
.cfi_startproc
# %bb.0:
leaq 4(%rsi), %rcx
leaq 4(%rdi), %rdx
xorl %r8d, %r8d
xorl %eax, %eax
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_6: # %.critedge
# in Loop: Header=BB2_1 Depth=1
cmpq $999, %r8 # imm = 0x3E7
leaq 1(%r8), %r9
setae %al
addq $4000, %rcx # imm = 0xFA0
addq $4000, %rdx # imm = 0xFA0
movq %r9, %r8
cmpq $1000, %r9 # imm = 0x3E8
je .LBB2_7
.LBB2_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
imulq $4000, %r8, %r9 # imm = 0xFA0
movl (%rdi,%r9), %r10d
cmpl (%rsi,%r9), %r10d
jne .LBB2_7
# %bb.2: # %.lr.ph.preheader
# in Loop: Header=BB2_1 Depth=1
movq $-1, %r9
.p2align 4, 0x90
.LBB2_3: # %.lr.ph
# Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
cmpq $998, %r9 # imm = 0x3E6
je .LBB2_6
# %bb.4: # in Loop: Header=BB2_3 Depth=2
movl 4(%rdx,%r9,4), %r11d
leaq 1(%r9), %r10
cmpl 4(%rcx,%r9,4), %r11d
movq %r10, %r9
je .LBB2_3
# %bb.5: # %._crit_edge
# in Loop: Header=BB2_1 Depth=1
cmpq $999, %r10 # imm = 0x3E7
jae .LBB2_6
.LBB2_7: # %.critedge29
andb $1, %al
# kill: def $al killed $al killed $rax
retq
.Lfunc_end2:
.size _Z9compareToPiS_, .Lfunc_end2-_Z9compareToPiS_
.cfi_endproc
# -- End function
.globl _Z11printMatrixPi # -- Begin function _Z11printMatrixPi
.p2align 4, 0x90
.type _Z11printMatrixPi,@function
_Z11printMatrixPi: # @_Z11printMatrixPi
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
xorl %r15d, %r15d
jmp .LBB3_1
.p2align 4, 0x90
.LBB3_6: # in Loop: Header=BB3_1 Depth=1
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB3_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB3_1 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r15
addq $4000, %rbx # imm = 0xFA0
cmpq $1000, %r15 # imm = 0x3E8
je .LBB3_8
.LBB3_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_2 Depth 2
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_2: # Parent Loop BB3_1 Depth=1
# => This Inner Loop Header: Depth=2
movl (%rbx,%r14,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r14
cmpq $1000, %r14 # imm = 0x3E8
jne .LBB3_2
# %bb.3: # in Loop: Header=BB3_1 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB3_9
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB3_1 Depth=1
cmpb $0, 56(%r14)
je .LBB3_6
# %bb.5: # in Loop: Header=BB3_1 Depth=1
movzbl 67(%r14), %eax
jmp .LBB3_7
.LBB3_8:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB3_9:
.cfi_def_cfa_offset 32
callq _ZSt16__throw_bad_castv
.Lfunc_end3:
.size _Z11printMatrixPi, .Lfunc_end3-_Z11printMatrixPi
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI4_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %rbx
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r14
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r15
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r12
xorl %r13d, %r13d
movl $4000000, %edx # imm = 0x3D0900
movq %r15, %rdi
xorl %esi, %esi
callq memset@PLT
movq %rbx, %rax
movq %r14, %rcx
.p2align 4, 0x90
.LBB4_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB4_2 Depth 2
movq %r13, %rdx
incq %r13
xorl %esi, %esi
.p2align 4, 0x90
.LBB4_2: # Parent Loop BB4_1 Depth=1
# => This Inner Loop Header: Depth=2
movl %edx, (%rax,%rsi,4)
movl %r13d, (%rcx,%rsi,4)
incq %rsi
cmpq $1000, %rsi # imm = 0x3E8
jne .LBB4_2
# %bb.3: # in Loop: Header=BB4_1 Depth=1
addq $4000, %rcx # imm = 0xFA0
addq $4000, %rax # imm = 0xFA0
cmpq $1000, %r13 # imm = 0x3E8
jne .LBB4_1
# %bb.4:
xorl %ebp, %ebp
callq clock
movq %rax, %r13
movq %rbx, %rax
.p2align 4, 0x90
.LBB4_5: # %.preheader19.i
# =>This Loop Header: Depth=1
# Child Loop BB4_6 Depth 2
# Child Loop BB4_7 Depth 3
imulq $4000, %rbp, %rcx # imm = 0xFA0
addq %r15, %rcx
movq %r14, %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB4_6: # %.preheader.i
# Parent Loop BB4_5 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB4_7 Depth 3
xorl %edi, %edi
movq %rdx, %r8
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB4_7: # Parent Loop BB4_5 Depth=1
# Parent Loop BB4_6 Depth=2
# => This Inner Loop Header: Depth=3
movl (%r8), %r10d
imull (%rax,%rdi,4), %r10d
addl %r10d, %r9d
incq %rdi
addq $4000, %r8 # imm = 0xFA0
cmpq $1000, %rdi # imm = 0x3E8
jne .LBB4_7
# %bb.8: # in Loop: Header=BB4_6 Depth=2
movl %r9d, (%rcx,%rsi,4)
incq %rsi
addq $4, %rdx
cmpq $1000, %rsi # imm = 0x3E8
jne .LBB4_6
# %bb.9: # in Loop: Header=BB4_5 Depth=1
incq %rbp
addq $4000, %rax # imm = 0xFA0
cmpq $1000, %rbp # imm = 0x3E8
jne .LBB4_5
# %bb.10: # %_Z7multMatPiS_S_.exit
callq clock
subq %r13, %rax
cvtsi2sd %rax, %xmm0
divsd .LCPI4_0(%rip), %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
leaq 16(%rsp), %rdi
movl $4000000, %esi # imm = 0x3D0900
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4000000, %esi # imm = 0x3D0900
callq hipMalloc
movq %rsp, %rdi
movl $4000000, %esi # imm = 0x3D0900
callq hipMalloc
movq 16(%rsp), %rdi
movl $4000000, %edx # imm = 0x3D0900
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $4000000, %edx # imm = 0x3D0900
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
callq clock
movq %rax, %r13
movabsq $137438953504, %rdi # imm = 0x2000000020
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_12
# %bb.11:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11multMatCUDAPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_12:
movq (%rsp), %rsi
movl $4000000, %edx # imm = 0x3D0900
movq %r12, %rdi
movl $2, %ecx
callq hipMemcpy
callq clock
subq %r13, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
divsd .LCPI4_0(%rip), %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
movl (%r15), %eax
cmpl (%r12), %eax
jne .LBB4_21
# %bb.13: # %.lr.ph.preheader.preheader
movq %r12, %rax
addq $4, %rax
movq %r15, %rdx
addq $4, %rdx
xorl %ecx, %ecx
xorl %edi, %edi
.p2align 4, 0x90
.LBB4_15: # %.lr.ph.preheader
# =>This Loop Header: Depth=1
# Child Loop BB4_16 Depth 2
movq %rdi, %rsi
movq $-1, %rdi
.p2align 4, 0x90
.LBB4_16: # %.lr.ph
# Parent Loop BB4_15 Depth=1
# => This Inner Loop Header: Depth=2
cmpq $998, %rdi # imm = 0x3E6
je .LBB4_19
# %bb.17: # in Loop: Header=BB4_16 Depth=2
movl 4(%rdx,%rdi,4), %r9d
leaq 1(%rdi), %r8
cmpl 4(%rax,%rdi,4), %r9d
movq %r8, %rdi
je .LBB4_16
# %bb.18: # %._crit_edge
# in Loop: Header=BB4_15 Depth=1
cmpq $999, %r8 # imm = 0x3E7
jb .LBB4_20
.LBB4_19: # %.critedge.i
# in Loop: Header=BB4_15 Depth=1
leaq 1(%rsi), %rdi
cmpq $999, %rsi # imm = 0x3E7
setae %cl
cmpq $1000, %rdi # imm = 0x3E8
je .LBB4_20
# %bb.14: # %.preheader.i44
# in Loop: Header=BB4_15 Depth=1
imulq $4000, %rdi, %rsi # imm = 0xFA0
movl (%r15,%rsi), %r8d
addq $4000, %rax # imm = 0xFA0
addq $4000, %rdx # imm = 0xFA0
cmpl (%r12,%rsi), %r8d
je .LBB4_15
.LBB4_20: # %_Z9compareToPiS_.exit
movl $.L.str.3, %edi
testb $1, %cl
jne .LBB4_22
.LBB4_21: # %.critedge
movl $.L.str.4, %edi
.LBB4_22:
xorl %eax, %eax
callq printf
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11multMatCUDAPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11multMatCUDAPiS_S_,@object # @_Z11multMatCUDAPiS_S_
.section .rodata,"a",@progbits
.globl _Z11multMatCUDAPiS_S_
.p2align 3, 0x0
_Z11multMatCUDAPiS_S_:
.quad _Z26__device_stub__multMatCUDAPiS_S_
.size _Z11multMatCUDAPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " "
.size .L.str, 2
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Tiempo invertido CPU = %lf s\n"
.size .L.str.1, 30
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Tiempo invertido GPU = %lf s\n"
.size .L.str.2, 30
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Matrices Iguales"
.size .L.str.3, 17
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Matrices Diferentes"
.size .L.str.4, 20
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11multMatCUDAPiS_S_"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__multMatCUDAPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11multMatCUDAPiS_S_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void updateStatistic ( const int nwl, const float *stt1, const float *q, const float *r, float *stt0 ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nwl ) {
stt0[i] = ( q[i] > r[i] ) * stt1[i] + ( q[i] < r[i] ) * stt0[i];
}
} | code for sm_80
Function : _Z15updateStatisticiPKfS0_S0_Pf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */
/* 0x001fca00078e0206 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x160], PT ; /* 0x0000580006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x178] ; /* 0x00005e0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x170] ; /* 0x00005c0006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea4000c1e1900 */
/*00b0*/ IMAD.WIDE R8, R6.reuse, R7.reuse, c[0x0][0x180] ; /* 0x0000600006087625 */
/* 0x0c0fe400078e0207 */
/*00c0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea4000c1e1900 */
/*00d0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x168] ; /* 0x00005a0006067625 */
/* 0x000fe400078e0207 */
/*00e0*/ LDG.E R11, [R8.64] ; /* 0x00000004080b7981 */
/* 0x000ee8000c1e1900 */
/*00f0*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */
/* 0x000f22000c1e1900 */
/*0100*/ FSET.BF.LT.AND R10, R3, R4, PT ; /* 0x00000004030a720a */
/* 0x004fc40003801000 */
/*0110*/ FSET.BF.GT.AND R0, R3, R4, PT ; /* 0x000000040300720a */
/* 0x000fc60003804000 */
/*0120*/ FMUL R10, R10, R11 ; /* 0x0000000b0a0a7220 */
/* 0x008fc80000400000 */
/*0130*/ FFMA R11, R0, R7, R10 ; /* 0x00000007000b7223 */
/* 0x010fca000000000a */
/*0140*/ STG.E [R8.64], R11 ; /* 0x0000000b08007986 */
/* 0x000fe2000c101904 */
/*0150*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0160*/ BRA 0x160; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void updateStatistic ( const int nwl, const float *stt1, const float *q, const float *r, float *stt0 ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nwl ) {
stt0[i] = ( q[i] > r[i] ) * stt1[i] + ( q[i] < r[i] ) * stt0[i];
}
} | .file "tmpxft_000c7025_00000000-6_updateStatistic.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z45__device_stub__Z15updateStatisticiPKfS0_S0_PfiPKfS0_S0_Pf
.type _Z45__device_stub__Z15updateStatisticiPKfS0_S0_PfiPKfS0_S0_Pf, @function
_Z45__device_stub__Z15updateStatisticiPKfS0_S0_PfiPKfS0_S0_Pf:
.LFB2051:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 44(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z15updateStatisticiPKfS0_S0_Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z45__device_stub__Z15updateStatisticiPKfS0_S0_PfiPKfS0_S0_Pf, .-_Z45__device_stub__Z15updateStatisticiPKfS0_S0_PfiPKfS0_S0_Pf
.globl _Z15updateStatisticiPKfS0_S0_Pf
.type _Z15updateStatisticiPKfS0_S0_Pf, @function
_Z15updateStatisticiPKfS0_S0_Pf:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z15updateStatisticiPKfS0_S0_PfiPKfS0_S0_Pf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15updateStatisticiPKfS0_S0_Pf, .-_Z15updateStatisticiPKfS0_S0_Pf
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z15updateStatisticiPKfS0_S0_Pf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z15updateStatisticiPKfS0_S0_Pf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void updateStatistic ( const int nwl, const float *stt1, const float *q, const float *r, float *stt0 ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nwl ) {
stt0[i] = ( q[i] > r[i] ) * stt1[i] + ( q[i] < r[i] ) * stt0[i];
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void updateStatistic ( const int nwl, const float *stt1, const float *q, const float *r, float *stt0 ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nwl ) {
stt0[i] = ( q[i] > r[i] ) * stt1[i] + ( q[i] < r[i] ) * stt0[i];
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.