system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<stdio.h>
int main(){
printf("Hello World!!!");
return 0;
} | .file "tmpxft_00168bba_00000000-6_hello.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello World!!!"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<stdio.h>
int main(){
printf("Hello World!!!");
return 0;
} | #include <hip/hip_runtime.h>
#include<stdio.h>
int main(){
printf("Hello World!!!");
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include<stdio.h>
int main(){
printf("Hello World!!!");
return 0;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include<stdio.h>
int main(){
printf("Hello World!!!");
return 0;
} | .text
.file "hello.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movl $.L.str, %edi
xorl %eax, %eax
callq printf
xorl %eax, %eax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Hello World!!!"
.size .L.str, 15
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00168bba_00000000-6_hello.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello World!!!"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "hello.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movl $.L.str, %edi
xorl %eax, %eax
callq printf
xorl %eax, %eax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Hello World!!!"
.size .L.str, 15
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda_runtime.h>
#include <cuda.h>
#include <curand.h>
#include <cuda_runtime_api.h>
#include <device_functions.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include<sys/time.h>
#include <sstream>
#include <iostream>
#include <fstream>
#include <iostream>
#include <stdio.h>
// printf() is only supported
// for devices of compute capability 2.0 and higher
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
#define printf(f, ...) ((void)(f, __VA_ARGS__),0)
#endif
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
/// The number of points to generate within 0,1
#define NUM_BOIDS 1000
#define NUM_FRAMES 150
__device__ float vectorMag_kernal(float _vector1, float _vector2, float _vector3)
{
float mag;
mag = sqrtf((_vector1*_vector1) + (_vector2*_vector2) + (_vector3*_vector3));
return mag;
}
__device__ void steerBoid_kernal(float * _targetX, float * _targetZ, float * _currentX, float * _currentZ, float * _sourceX, float *_sourceZ)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
float steeringForce = 0.02;
float diffX[NUM_BOIDS];
float diffZ[NUM_BOIDS];
diffX[idx] = _targetX[idx] - _currentX[idx];
diffZ[idx] = _targetZ[idx] - _currentZ[idx];
_sourceX[idx] =( (diffX[idx]/vectorMag_kernal(diffX[idx], 0, diffZ[idx]))*steeringForce);
_sourceZ[idx] =( (diffZ[idx]/vectorMag_kernal(diffX[idx], 0, diffZ[idx]))*steeringForce);
}
__global__ void avoidBoundaries_kernal(float * _posx, float * _posz, float * _velx, float * _velz, int _noBoids)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
float desiredVelX[NUM_BOIDS];
float desiredVelZ[NUM_BOIDS];
float * desiredVelX_ptr = &desiredVelX[0];
float * desiredVelZ_ptr = &desiredVelZ[0];
if(idx<_noBoids)
{
if(_posz[idx] >= 2 && _velz[idx] >0)
{
desiredVelX[idx] = _velx[idx];
desiredVelZ[idx] = -_velz[idx];
steerBoid_kernal(desiredVelX_ptr, desiredVelZ_ptr, _velx, _velz, _velx, _velz);
// _velz[idx] = -_velz[idx];
}
else if(_posz[idx] <= -2 && _velz[idx] <0)
{
desiredVelX[idx] = _velx[idx];
desiredVelZ[idx] = -_velz[idx];
steerBoid_kernal(desiredVelX_ptr, desiredVelZ_ptr, _velx, _velz, _velx, _velz);
//_velz[idx] = -_velz[idx];
}
else if(_posx[idx] >= 2 && _velx[idx] >0)
{
desiredVelX[idx] = -_velx[idx];
desiredVelZ[idx] = _velz[idx];
steerBoid_kernal(desiredVelX_ptr, desiredVelZ_ptr, _velx, _velz, _velx, _velz);
//_velx[idx] = -_velx[idx];
}
else if(_posx[idx] <= -2 && _velx[idx] <0)
{
desiredVelX[idx] = -_velx[idx];
desiredVelZ[idx] = _velz[idx];
steerBoid_kernal(desiredVelX_ptr, desiredVelZ_ptr, _velx, _velz, _velx, _velz);
//_velx[idx] = -_velx[idx];
}
}
}
__global__ void updatePos_kernal(float * _posx, float * _posz, float * _velx, float * _velz, int _noBoids)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx< _noBoids)
{
_posx[idx] += _velx[idx];
_posz[idx] += _velz[idx];
}
}
__device__ float distance_kernal(float _posx, float _posz, float _otherPosx, float _otherPosz)
{
float distance = sqrtf(((_posx-_otherPosx)*(_posx-_otherPosx)) + ((_posz-_otherPosz)*(_posz-_otherPosz)));
return distance;
}
__global__ void limitVel_kernal(float _limit, float * _posx, float * _posz, float * _velx, float * _velz, const int _noBoids)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
float mag[NUM_BOIDS];
if(idx < _noBoids)
{
mag[idx] = sqrtf((_velx[idx]*_velx[idx]) + (_velz[idx]*_velz[idx]));
if( mag[idx] > _limit)
{
_velx[idx] = (_velx[idx]/mag[idx])*_limit;
_velz[idx] = (_velz[idx]/mag[idx])*_limit;
}
}
}
__device__ void alignment_kernal(float * _alignmentVectorX, float * _alignmentVectorZ, float * _posx, float * _posz, float * _velx, float * _velz, int _numBoids)
{
int const noBoids = _numBoids;
__shared__ unsigned int numberOfNeighbours[NUM_BOIDS];
float tmpX[NUM_BOIDS];
float tmpZ[NUM_BOIDS];
float mag[NUM_BOIDS];
// current boid whos neighbours were looking for
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
// neighbours of current boid
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < noBoids && idy < noBoids)
{
// reset values
numberOfNeighbours[idx] = 0;
// wait for threads to sync
__syncthreads();
if(idx != idy)
{
if(distance_kernal(_posx[idx], _posz[idx], _posx[idy], _posz[idy]) < 0.15)
{
atomicAdd(&(_alignmentVectorX[idx]), _velx[idy]);
atomicAdd(&(_alignmentVectorZ[idx]), _velz[idy]);
atomicAdd(&numberOfNeighbours[idx], 1);
}
}
}
// wait for threads to sync
__syncthreads();
//limit to 1D
if(idy == 0 && idx< noBoids)
{
//avoid dividing by zero
if(numberOfNeighbours[idx] > 0)
{
//find average position
_alignmentVectorX[idx] = _alignmentVectorX[idx]/numberOfNeighbours[idx];
_alignmentVectorZ[idx] = _alignmentVectorZ[idx]/numberOfNeighbours[idx];
// normalize
mag[idx] = norm3d(_alignmentVectorX[idx], 0.0f, _alignmentVectorZ[idx]);
if(mag[idx] > 0)
{
_alignmentVectorX[idx] = (_alignmentVectorX[idx] / mag[idx]);
_alignmentVectorZ[idx] = (_alignmentVectorZ[idx] / mag[idx]);
}
//steer
steerBoid_kernal(_alignmentVectorX, _alignmentVectorZ, _velx, _velz, _alignmentVectorX, _alignmentVectorZ);
}
}
}
__device__ void seperation_kernal(float * _seperationVectorX, float * _seperationVectorZ, float * _posx, float * _posz, float * _velx, float * _velz, int _numBoids)
{
int const noBoids = _numBoids;
__shared__ float _diffVectorX[NUM_BOIDS];
__shared__ float _diffVectorZ[NUM_BOIDS];
__shared__ unsigned int numberOfNeighbours[NUM_BOIDS];
float tmpX[NUM_BOIDS];
float tmpZ[NUM_BOIDS];
float mag[NUM_BOIDS];
// current boid whos neighbours were looking for
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
// neighbours of current boid
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < noBoids && idy < noBoids)
{
// reset values
numberOfNeighbours[idx] = 0;
_diffVectorX[idx] = 0;
_diffVectorZ[idx] = 0;
// wait for threads to sync
__syncthreads();
if(idx != idy)
{
if(distance_kernal(_posx[idx], _posz[idx], _posx[idy], _posz[idy]) < 0.1)
{
atomicAdd(&(_diffVectorX[idx]), (_posx[idy]-_posx[idx]));
atomicAdd(&(_diffVectorZ[idx]), (_posz[idy]-_posz[idx]));
// normalise (make atomic)
//_diffVectorX[idx] = _diffVectorX[idx] / norm3d(_diffVectorX[idx], 0.0f, _diffVectorZ[idx]);
//_diffVectorZ[idx] = _diffVectorZ[idx] / norm3d(_diffVectorX[idx], 0.0f, _diffVectorZ[idx]);
// add neighbours position to current boids part of the seperation vector
atomicAdd(&(_seperationVectorX[idx]), _diffVectorX[idx]);
atomicAdd(&(_seperationVectorZ[idx]), _diffVectorZ[idx]);
atomicAdd(&numberOfNeighbours[idx], 1);
}
}
}
// wait for threads to sync
__syncthreads();
//limit to 1D
if(idy == 0 && idx< noBoids)
{
//avoid dividing by zero
if(numberOfNeighbours[idx] > 0)
{
// tmpX[idx] = _seperationVectorX[idx]/numberOfNeighbours[idx];
//tmpZ[idx] = _seperationVectorZ[idx]/numberOfNeighbours[idx];
//find average position
_seperationVectorX[idx] = _seperationVectorX[idx]/numberOfNeighbours[idx];
_seperationVectorZ[idx] = _seperationVectorZ[idx]/numberOfNeighbours[idx];
_seperationVectorX[idx] = ( _seperationVectorX[idx] * -1);
_seperationVectorZ[idx] = ( _seperationVectorZ[idx] * -1);
mag[idx] = norm3d(_seperationVectorX[idx], 0.0f, _seperationVectorZ[idx]);
if(mag[idx]>0)
{
_seperationVectorX[idx] = (_seperationVectorX[idx] / mag[idx]);
_seperationVectorZ[idx] = (_seperationVectorZ[idx] / mag[idx]);
}
steerBoid_kernal(_seperationVectorX, _seperationVectorZ, _velx, _velz, _seperationVectorX, _seperationVectorZ);
}
}
}
__device__ void cohesion_kernal(float * _cohesionVectorX, float * _cohesionVectorZ, float * _posx, float * _posz, float * _velx, float * _velz, int _numBoids)
{
int const noBoids = _numBoids;
__shared__ unsigned int numberOfNeighbours[NUM_BOIDS];
float mag[NUM_BOIDS];
// current boid whos neighbours were looking for
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
// neighbours of current boid
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < noBoids && idy < noBoids)
{
// reset values
numberOfNeighbours[idx] = 0;
_cohesionVectorX[idx] = 0;
_cohesionVectorZ[idx] = 0;
// wait for threads to sync
__syncthreads();
if(idx != idy)
{
if(distance_kernal(_posx[idx], _posz[idx], _posx[idy], _posz[idy]) < 0.2)
{
// add neighbours position to current boids part of the cohesion vector
atomicAdd(&(_cohesionVectorX[idx]), _posx[idy]);
atomicAdd(&(_cohesionVectorZ[idx]), _posz[idy]);
atomicAdd(&numberOfNeighbours[idx], 1);
}
}
}
// wait for threads to sync
__syncthreads();
//limit to 1D
if(idy == 0 && idx< noBoids)
{
//avoid dividing by zero
if(numberOfNeighbours[idx] > 0)
{
float tmpX = _cohesionVectorX[idx]/numberOfNeighbours[idx];
float tmpZ = _cohesionVectorZ[idx]/numberOfNeighbours[idx];
//find average position
_cohesionVectorX[idx] = tmpX;
_cohesionVectorZ[idx] = tmpZ;
_cohesionVectorX[idx] = ( _cohesionVectorX[idx] - _posx[idx]);
_cohesionVectorZ[idx] = ( _cohesionVectorZ[idx] - _posz[idx]);
mag[idx] = norm3d(_cohesionVectorX[idx], 0.0f, _cohesionVectorZ[idx]);
if(mag[idx] > 0)
{
_cohesionVectorX[idx] = (_cohesionVectorX[idx] / mag[idx]);
_cohesionVectorZ[idx] = (_cohesionVectorZ[idx] / mag[idx]);
}
steerBoid_kernal(_cohesionVectorX, _cohesionVectorZ, _velx, _velz, _cohesionVectorX, _cohesionVectorZ);
}
}
}
__global__ void flock_kernal(float * _cohesionVectorX, float * _cohesionVectorZ,float * _seperationVectorX, float * _seperationVectorZ, float * _alignmentVectorX, float * _alignmentVectorZ, float * _posx, float * _posz, float * _velx, float * _velz, int _numBoids)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
float mag[NUM_BOIDS];
if( idx <_numBoids)
{
// calculate cohesion
cohesion_kernal(_cohesionVectorX, _cohesionVectorZ, _posx, _posz, _velx, _velz, _numBoids);
seperation_kernal(_seperationVectorX, _seperationVectorZ, _posx, _posz, _velx, _velz, _numBoids);
alignment_kernal(_alignmentVectorX, _alignmentVectorZ, _posx, _posz, _velx, _velz, _numBoids);
// wait for threads to sync (dont add cohesion vector until calculated)
__syncthreads();
if(idy == 0)
{
_velx[idx]+= _cohesionVectorX[idx] + _seperationVectorX[idx] + _alignmentVectorX[idx];
_velz[idx]+= _cohesionVectorZ[idx] + _seperationVectorZ[idx] + _alignmentVectorZ[idx];
}
}
}
void dumpGeo(uint _frameNumber, thrust::device_vector <float> _posX, thrust::device_vector <float> _posZ)
{
char fname[150];
std::sprintf(fname,"geo/flock_gpu.%03d.geo",++_frameNumber);
// we will use a stringstream as it may be more efficient
std::stringstream ss;
std::ofstream file;
file.open(fname);
if (!file.is_open())
{
std::cerr << "failed to Open file "<<fname<<'\n';
exit(EXIT_FAILURE);
}
// write header see here http://www.sidefx.com/docs/houdini15.0/io/formats/geo
ss << "PGEOMETRY V5\n";
ss << "NPoints " << NUM_BOIDS << " NPrims 1\n";
ss << "NPointGroups 0 NPrimGroups 1\n";
// this is hard coded but could be flexible we have 1 attrib which is Colour
ss << "NPointAttrib 1 NVertexAttrib 0 NPrimAttrib 2 NAttrib 0\n";
// now write out our point attrib this case Cd for diffuse colour
ss <<"PointAttrib \n";
// default the colour to white
ss <<"Cd 3 float 1 1 1\n";
// now we write out the particle data in the format
// x y z 1 (attrib so in this case colour)
for(unsigned int i=0; i<NUM_BOIDS; ++i)
{
ss<<_posX[i]<<" "<<0<<" "<<_posZ[i] << " 1 ";
ss<<"("<<std::abs(1)<<" "<<std::abs(1)<<" "<<std::abs(1)<<")\n";
}
// now write out the index values
ss<<"PrimitiveAttrib\n";
ss<<"generator 1 index 1 location1\n";
ss<<"dopobject 1 index 1 /obj/AutoDopNetwork:1\n";
ss<<"Part "<<NUM_BOIDS<<" ";
for(size_t i=0; i<NUM_BOIDS; ++i)
{
ss<<i<<" ";
}
ss<<" [0 0]\n";
ss<<"box_object1 unordered\n";
ss<<"1 1\n";
ss<<"beginExtra\n";
ss<<"endExtra\n";
// dump string stream to disk;
file<<ss.rdbuf();
file.close();
}
/**
* Fill an array with random floats using the CURAND function.
* \param devData The chunk of memory you want to fill with floats within the range (0,1]
* \param n The size of the chunk of data
* \author Richard Southern
*/
int randFloats(float *&devData, const size_t n) {
// The generator, used for random numbers
curandGenerator_t gen;
// Create pseudo-random number generator
CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
// Set seed to be the current time (note that calls close together will have same seed!)
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, time(NULL)));
// Generate n floats on device
CURAND_CALL(curandGenerateUniform(gen, devData, n));
// Cleanup
CURAND_CALL(curandDestroyGenerator(gen));
return EXIT_SUCCESS;
}
int main()
{
// stores point pos
thrust::device_vector<float> m_dBoidsPosX;
thrust::device_vector<float> m_dBoidsPosZ;
// stores point pos
thrust::device_vector<float> m_dBoidsVelX;
thrust::device_vector<float> m_dBoidsVelZ;
// stores flocking vectors
thrust::device_vector<float> m_dCohesionX;
thrust::device_vector<float> m_dCohesionZ;
thrust::device_vector<float> m_dSeperationX;
thrust::device_vector<float> m_dSeperationZ;
thrust::device_vector<float> m_dAlignmentX;
thrust::device_vector<float> m_dAlignmentZ;
//thrust::device_vector<float> d_Pos(NUM_BOIDS*3);
// cant set size when constructing as member variable so resize here instead
m_dBoidsPosX.resize(NUM_BOIDS);
m_dBoidsPosZ.resize(NUM_BOIDS);
m_dBoidsVelX.resize(NUM_BOIDS);
m_dBoidsVelZ.resize(NUM_BOIDS);
m_dCohesionX.resize(NUM_BOIDS);
m_dCohesionZ.resize(NUM_BOIDS);
m_dSeperationX.resize(NUM_BOIDS);
m_dSeperationZ.resize(NUM_BOIDS);
m_dAlignmentX.resize(NUM_BOIDS);
m_dAlignmentZ.resize(NUM_BOIDS);
// fill vector with random values for pos
thrust::device_vector <float> tmp_PosPnts(NUM_BOIDS*4);
float * tmp_PosPnts_ptr = thrust::raw_pointer_cast(&tmp_PosPnts[0]);
randFloats(tmp_PosPnts_ptr, NUM_BOIDS*4);
// give random start positions
m_dBoidsPosX.assign(tmp_PosPnts.begin(), tmp_PosPnts.begin() + NUM_BOIDS);
m_dBoidsPosZ.assign(tmp_PosPnts.begin() + NUM_BOIDS, tmp_PosPnts.begin() + 2*NUM_BOIDS);
// give random start vel
m_dBoidsVelX.assign(tmp_PosPnts.begin() + 2*NUM_BOIDS, tmp_PosPnts.begin() + 3*NUM_BOIDS);
m_dBoidsVelZ.assign(tmp_PosPnts.begin() + 3*NUM_BOIDS, tmp_PosPnts.begin() + 4*NUM_BOIDS);
// create pointers pointing to the device vectors
float * m_dBoidsPosX_ptr= thrust::raw_pointer_cast(&m_dBoidsPosX[0]);
float * m_dBoidsPosZ_ptr= thrust::raw_pointer_cast(&m_dBoidsPosZ[0]);
float * m_dBoidsVelX_ptr= thrust::raw_pointer_cast(&m_dBoidsVelX[0]);
float * m_dBoidsVelZ_ptr= thrust::raw_pointer_cast(&m_dBoidsVelZ[0]);
float * m_dCohesionX_ptr= thrust::raw_pointer_cast(&m_dCohesionX[0]);
float * m_dCohesionZ_ptr= thrust::raw_pointer_cast(&m_dCohesionZ[0]);
float * m_dSeperationX_ptr= thrust::raw_pointer_cast(&m_dSeperationX[0]);
float * m_dSeperationZ_ptr= thrust::raw_pointer_cast(&m_dSeperationZ[0]);
float * m_dAlignmentX_ptr= thrust::raw_pointer_cast(&m_dAlignmentX[0]);
float * m_dAlignmentZ_ptr= thrust::raw_pointer_cast(&m_dAlignmentZ[0]);
//UPDATE-----------------------------------------------------------------------------
unsigned int nThreads = 1024;
unsigned int nBlocks = NUM_BOIDS/ nThreads + 1;
//thrust::device_vector<unsigned int> d_numNeighbourBoids(GRID_RESOLUTION*GRID_RESOLUTION, NULL_CELL);
//unsigned int * d_numNeighbourBoids_ptr = thrust::raw_pointer_cast(&d_numNeighbourBoids[0]);
// for nearest neighbour
unsigned int blockN = NUM_BOIDS / 32 + 1;
dim3 block2(32, 32); // block of (X,Y) threads
dim3 grid2(blockN, 1); // grid blockN * blockN blocks
for(int i = 0; i<NUM_FRAMES; i++)
{
// reset vectors
thrust::fill(m_dCohesionX.begin(), m_dCohesionX.begin() + NUM_BOIDS, 0);
thrust::fill(m_dCohesionZ.begin(), m_dCohesionZ.begin() + NUM_BOIDS, 0);
thrust::fill(m_dSeperationX.begin(), m_dSeperationX.begin() + NUM_BOIDS, 0);
thrust::fill(m_dSeperationZ.begin(), m_dSeperationZ.begin() + NUM_BOIDS, 0);
thrust::fill(m_dAlignmentX.begin(), m_dAlignmentX.begin() + NUM_BOIDS, 0);
thrust::fill(m_dAlignmentZ.begin(), m_dAlignmentZ.begin() + NUM_BOIDS, 0);
flock_kernal<<<grid2,block2>>>(m_dCohesionX_ptr, m_dCohesionZ_ptr, m_dSeperationX_ptr, m_dSeperationZ_ptr, m_dAlignmentX_ptr, m_dAlignmentZ_ptr, m_dBoidsPosX_ptr, m_dBoidsPosZ_ptr, m_dBoidsVelX_ptr, m_dBoidsVelZ_ptr, NUM_BOIDS);
cudaThreadSynchronize();
limitVel_kernal<<<nBlocks,nThreads>>>(0.02, m_dBoidsPosX_ptr, m_dBoidsPosZ_ptr, m_dBoidsVelX_ptr, m_dBoidsVelZ_ptr, NUM_BOIDS);
cudaThreadSynchronize();
avoidBoundaries_kernal<<<nBlocks,1024>>>(m_dBoidsPosX_ptr, m_dBoidsPosZ_ptr, m_dBoidsVelX_ptr, m_dBoidsVelZ_ptr, NUM_BOIDS);
cudaThreadSynchronize();
updatePos_kernal<<<nBlocks,1024>>>(m_dBoidsPosX_ptr, m_dBoidsPosZ_ptr, m_dBoidsVelX_ptr, m_dBoidsVelZ_ptr, NUM_BOIDS);
cudaThreadSynchronize();
dumpGeo(i, m_dBoidsPosX, m_dBoidsPosZ);
}
} | #include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hip/hip_runtime_api.h>
#include <hip/device_functions.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include<sys/time.h>
#include <sstream>
#include <iostream>
#include <fstream>
#include <iostream>
#include <stdio.h>
// printf() is only supported
// for devices of compute capability 2.0 and higher
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
#define printf(f, ...) ((void)(f, __VA_ARGS__),0)
#endif
#define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
/// The number of points to generate within 0,1
#define NUM_BOIDS 1000
#define NUM_FRAMES 150
__device__ float vectorMag_kernal(float _vector1, float _vector2, float _vector3)
{
float mag;
mag = sqrtf((_vector1*_vector1) + (_vector2*_vector2) + (_vector3*_vector3));
return mag;
}
__device__ void steerBoid_kernal(float * _targetX, float * _targetZ, float * _currentX, float * _currentZ, float * _sourceX, float *_sourceZ)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
float steeringForce = 0.02;
float diffX[NUM_BOIDS];
float diffZ[NUM_BOIDS];
diffX[idx] = _targetX[idx] - _currentX[idx];
diffZ[idx] = _targetZ[idx] - _currentZ[idx];
_sourceX[idx] =( (diffX[idx]/vectorMag_kernal(diffX[idx], 0, diffZ[idx]))*steeringForce);
_sourceZ[idx] =( (diffZ[idx]/vectorMag_kernal(diffX[idx], 0, diffZ[idx]))*steeringForce);
}
__global__ void avoidBoundaries_kernal(float * _posx, float * _posz, float * _velx, float * _velz, int _noBoids)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
float desiredVelX[NUM_BOIDS];
float desiredVelZ[NUM_BOIDS];
float * desiredVelX_ptr = &desiredVelX[0];
float * desiredVelZ_ptr = &desiredVelZ[0];
if(idx<_noBoids)
{
if(_posz[idx] >= 2 && _velz[idx] >0)
{
desiredVelX[idx] = _velx[idx];
desiredVelZ[idx] = -_velz[idx];
steerBoid_kernal(desiredVelX_ptr, desiredVelZ_ptr, _velx, _velz, _velx, _velz);
// _velz[idx] = -_velz[idx];
}
else if(_posz[idx] <= -2 && _velz[idx] <0)
{
desiredVelX[idx] = _velx[idx];
desiredVelZ[idx] = -_velz[idx];
steerBoid_kernal(desiredVelX_ptr, desiredVelZ_ptr, _velx, _velz, _velx, _velz);
//_velz[idx] = -_velz[idx];
}
else if(_posx[idx] >= 2 && _velx[idx] >0)
{
desiredVelX[idx] = -_velx[idx];
desiredVelZ[idx] = _velz[idx];
steerBoid_kernal(desiredVelX_ptr, desiredVelZ_ptr, _velx, _velz, _velx, _velz);
//_velx[idx] = -_velx[idx];
}
else if(_posx[idx] <= -2 && _velx[idx] <0)
{
desiredVelX[idx] = -_velx[idx];
desiredVelZ[idx] = _velz[idx];
steerBoid_kernal(desiredVelX_ptr, desiredVelZ_ptr, _velx, _velz, _velx, _velz);
//_velx[idx] = -_velx[idx];
}
}
}
__global__ void updatePos_kernal(float * _posx, float * _posz, float * _velx, float * _velz, int _noBoids)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx< _noBoids)
{
_posx[idx] += _velx[idx];
_posz[idx] += _velz[idx];
}
}
__device__ float distance_kernal(float _posx, float _posz, float _otherPosx, float _otherPosz)
{
float distance = sqrtf(((_posx-_otherPosx)*(_posx-_otherPosx)) + ((_posz-_otherPosz)*(_posz-_otherPosz)));
return distance;
}
__global__ void limitVel_kernal(float _limit, float * _posx, float * _posz, float * _velx, float * _velz, const int _noBoids)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
float mag[NUM_BOIDS];
if(idx < _noBoids)
{
mag[idx] = sqrtf((_velx[idx]*_velx[idx]) + (_velz[idx]*_velz[idx]));
if( mag[idx] > _limit)
{
_velx[idx] = (_velx[idx]/mag[idx])*_limit;
_velz[idx] = (_velz[idx]/mag[idx])*_limit;
}
}
}
__device__ void alignment_kernal(float * _alignmentVectorX, float * _alignmentVectorZ, float * _posx, float * _posz, float * _velx, float * _velz, int _numBoids)
{
int const noBoids = _numBoids;
__shared__ unsigned int numberOfNeighbours[NUM_BOIDS];
float tmpX[NUM_BOIDS];
float tmpZ[NUM_BOIDS];
float mag[NUM_BOIDS];
// current boid whos neighbours were looking for
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
// neighbours of current boid
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < noBoids && idy < noBoids)
{
// reset values
numberOfNeighbours[idx] = 0;
// wait for threads to sync
__syncthreads();
if(idx != idy)
{
if(distance_kernal(_posx[idx], _posz[idx], _posx[idy], _posz[idy]) < 0.15)
{
atomicAdd(&(_alignmentVectorX[idx]), _velx[idy]);
atomicAdd(&(_alignmentVectorZ[idx]), _velz[idy]);
atomicAdd(&numberOfNeighbours[idx], 1);
}
}
}
// wait for threads to sync
__syncthreads();
//limit to 1D
if(idy == 0 && idx< noBoids)
{
//avoid dividing by zero
if(numberOfNeighbours[idx] > 0)
{
//find average position
_alignmentVectorX[idx] = _alignmentVectorX[idx]/numberOfNeighbours[idx];
_alignmentVectorZ[idx] = _alignmentVectorZ[idx]/numberOfNeighbours[idx];
// normalize
mag[idx] = norm3d(_alignmentVectorX[idx], 0.0f, _alignmentVectorZ[idx]);
if(mag[idx] > 0)
{
_alignmentVectorX[idx] = (_alignmentVectorX[idx] / mag[idx]);
_alignmentVectorZ[idx] = (_alignmentVectorZ[idx] / mag[idx]);
}
//steer
steerBoid_kernal(_alignmentVectorX, _alignmentVectorZ, _velx, _velz, _alignmentVectorX, _alignmentVectorZ);
}
}
}
__device__ void seperation_kernal(float * _seperationVectorX, float * _seperationVectorZ, float * _posx, float * _posz, float * _velx, float * _velz, int _numBoids)
{
int const noBoids = _numBoids;
__shared__ float _diffVectorX[NUM_BOIDS];
__shared__ float _diffVectorZ[NUM_BOIDS];
__shared__ unsigned int numberOfNeighbours[NUM_BOIDS];
float tmpX[NUM_BOIDS];
float tmpZ[NUM_BOIDS];
float mag[NUM_BOIDS];
// current boid whos neighbours were looking for
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
// neighbours of current boid
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < noBoids && idy < noBoids)
{
// reset values
numberOfNeighbours[idx] = 0;
_diffVectorX[idx] = 0;
_diffVectorZ[idx] = 0;
// wait for threads to sync
__syncthreads();
if(idx != idy)
{
if(distance_kernal(_posx[idx], _posz[idx], _posx[idy], _posz[idy]) < 0.1)
{
atomicAdd(&(_diffVectorX[idx]), (_posx[idy]-_posx[idx]));
atomicAdd(&(_diffVectorZ[idx]), (_posz[idy]-_posz[idx]));
// normalise (make atomic)
//_diffVectorX[idx] = _diffVectorX[idx] / norm3d(_diffVectorX[idx], 0.0f, _diffVectorZ[idx]);
//_diffVectorZ[idx] = _diffVectorZ[idx] / norm3d(_diffVectorX[idx], 0.0f, _diffVectorZ[idx]);
// add neighbours position to current boids part of the seperation vector
atomicAdd(&(_seperationVectorX[idx]), _diffVectorX[idx]);
atomicAdd(&(_seperationVectorZ[idx]), _diffVectorZ[idx]);
atomicAdd(&numberOfNeighbours[idx], 1);
}
}
}
// wait for threads to sync
__syncthreads();
//limit to 1D
if(idy == 0 && idx< noBoids)
{
//avoid dividing by zero
if(numberOfNeighbours[idx] > 0)
{
// tmpX[idx] = _seperationVectorX[idx]/numberOfNeighbours[idx];
//tmpZ[idx] = _seperationVectorZ[idx]/numberOfNeighbours[idx];
//find average position
_seperationVectorX[idx] = _seperationVectorX[idx]/numberOfNeighbours[idx];
_seperationVectorZ[idx] = _seperationVectorZ[idx]/numberOfNeighbours[idx];
_seperationVectorX[idx] = ( _seperationVectorX[idx] * -1);
_seperationVectorZ[idx] = ( _seperationVectorZ[idx] * -1);
mag[idx] = norm3d(_seperationVectorX[idx], 0.0f, _seperationVectorZ[idx]);
if(mag[idx]>0)
{
_seperationVectorX[idx] = (_seperationVectorX[idx] / mag[idx]);
_seperationVectorZ[idx] = (_seperationVectorZ[idx] / mag[idx]);
}
steerBoid_kernal(_seperationVectorX, _seperationVectorZ, _velx, _velz, _seperationVectorX, _seperationVectorZ);
}
}
}
__device__ void cohesion_kernal(float * _cohesionVectorX, float * _cohesionVectorZ, float * _posx, float * _posz, float * _velx, float * _velz, int _numBoids)
{
int const noBoids = _numBoids;
__shared__ unsigned int numberOfNeighbours[NUM_BOIDS];
float mag[NUM_BOIDS];
// current boid whos neighbours were looking for
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
// neighbours of current boid
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < noBoids && idy < noBoids)
{
// reset values
numberOfNeighbours[idx] = 0;
_cohesionVectorX[idx] = 0;
_cohesionVectorZ[idx] = 0;
// wait for threads to sync
__syncthreads();
if(idx != idy)
{
if(distance_kernal(_posx[idx], _posz[idx], _posx[idy], _posz[idy]) < 0.2)
{
// add neighbours position to current boids part of the cohesion vector
atomicAdd(&(_cohesionVectorX[idx]), _posx[idy]);
atomicAdd(&(_cohesionVectorZ[idx]), _posz[idy]);
atomicAdd(&numberOfNeighbours[idx], 1);
}
}
}
// wait for threads to sync
__syncthreads();
//limit to 1D
if(idy == 0 && idx< noBoids)
{
//avoid dividing by zero
if(numberOfNeighbours[idx] > 0)
{
float tmpX = _cohesionVectorX[idx]/numberOfNeighbours[idx];
float tmpZ = _cohesionVectorZ[idx]/numberOfNeighbours[idx];
//find average position
_cohesionVectorX[idx] = tmpX;
_cohesionVectorZ[idx] = tmpZ;
_cohesionVectorX[idx] = ( _cohesionVectorX[idx] - _posx[idx]);
_cohesionVectorZ[idx] = ( _cohesionVectorZ[idx] - _posz[idx]);
mag[idx] = norm3d(_cohesionVectorX[idx], 0.0f, _cohesionVectorZ[idx]);
if(mag[idx] > 0)
{
_cohesionVectorX[idx] = (_cohesionVectorX[idx] / mag[idx]);
_cohesionVectorZ[idx] = (_cohesionVectorZ[idx] / mag[idx]);
}
steerBoid_kernal(_cohesionVectorX, _cohesionVectorZ, _velx, _velz, _cohesionVectorX, _cohesionVectorZ);
}
}
}
__global__ void flock_kernal(float * _cohesionVectorX, float * _cohesionVectorZ,float * _seperationVectorX, float * _seperationVectorZ, float * _alignmentVectorX, float * _alignmentVectorZ, float * _posx, float * _posz, float * _velx, float * _velz, int _numBoids)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
float mag[NUM_BOIDS];
if( idx <_numBoids)
{
// calculate cohesion
cohesion_kernal(_cohesionVectorX, _cohesionVectorZ, _posx, _posz, _velx, _velz, _numBoids);
seperation_kernal(_seperationVectorX, _seperationVectorZ, _posx, _posz, _velx, _velz, _numBoids);
alignment_kernal(_alignmentVectorX, _alignmentVectorZ, _posx, _posz, _velx, _velz, _numBoids);
// wait for threads to sync (dont add cohesion vector until calculated)
__syncthreads();
if(idy == 0)
{
_velx[idx]+= _cohesionVectorX[idx] + _seperationVectorX[idx] + _alignmentVectorX[idx];
_velz[idx]+= _cohesionVectorZ[idx] + _seperationVectorZ[idx] + _alignmentVectorZ[idx];
}
}
}
void dumpGeo(uint _frameNumber, thrust::device_vector <float> _posX, thrust::device_vector <float> _posZ)
{
char fname[150];
std::sprintf(fname,"geo/flock_gpu.%03d.geo",++_frameNumber);
// we will use a stringstream as it may be more efficient
std::stringstream ss;
std::ofstream file;
file.open(fname);
if (!file.is_open())
{
std::cerr << "failed to Open file "<<fname<<'\n';
exit(EXIT_FAILURE);
}
// write header see here http://www.sidefx.com/docs/houdini15.0/io/formats/geo
ss << "PGEOMETRY V5\n";
ss << "NPoints " << NUM_BOIDS << " NPrims 1\n";
ss << "NPointGroups 0 NPrimGroups 1\n";
// this is hard coded but could be flexible we have 1 attrib which is Colour
ss << "NPointAttrib 1 NVertexAttrib 0 NPrimAttrib 2 NAttrib 0\n";
// now write out our point attrib this case Cd for diffuse colour
ss <<"PointAttrib \n";
// default the colour to white
ss <<"Cd 3 float 1 1 1\n";
// now we write out the particle data in the format
// x y z 1 (attrib so in this case colour)
for(unsigned int i=0; i<NUM_BOIDS; ++i)
{
ss<<_posX[i]<<" "<<0<<" "<<_posZ[i] << " 1 ";
ss<<"("<<std::abs(1)<<" "<<std::abs(1)<<" "<<std::abs(1)<<")\n";
}
// now write out the index values
ss<<"PrimitiveAttrib\n";
ss<<"generator 1 index 1 location1\n";
ss<<"dopobject 1 index 1 /obj/AutoDopNetwork:1\n";
ss<<"Part "<<NUM_BOIDS<<" ";
for(size_t i=0; i<NUM_BOIDS; ++i)
{
ss<<i<<" ";
}
ss<<" [0 0]\n";
ss<<"box_object1 unordered\n";
ss<<"1 1\n";
ss<<"beginExtra\n";
ss<<"endExtra\n";
// dump string stream to disk;
file<<ss.rdbuf();
file.close();
}
/**
* Fill an array with random floats using the CURAND function.
* \param devData The chunk of memory you want to fill with floats within the range (0,1]
* \param n The size of the chunk of data
* \author Richard Southern
*/
int randFloats(float *&devData, const size_t n) {
// The generator, used for random numbers
hiprandGenerator_t gen;
// Create pseudo-random number generator
CURAND_CALL(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
// Set seed to be the current time (note that calls close together will have same seed!)
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen, time(NULL)));
// Generate n floats on device
CURAND_CALL(hiprandGenerateUniform(gen, devData, n));
// Cleanup
CURAND_CALL(hiprandDestroyGenerator(gen));
return EXIT_SUCCESS;
}
int main()
{
// stores point pos
thrust::device_vector<float> m_dBoidsPosX;
thrust::device_vector<float> m_dBoidsPosZ;
// stores point pos
thrust::device_vector<float> m_dBoidsVelX;
thrust::device_vector<float> m_dBoidsVelZ;
// stores flocking vectors
thrust::device_vector<float> m_dCohesionX;
thrust::device_vector<float> m_dCohesionZ;
thrust::device_vector<float> m_dSeperationX;
thrust::device_vector<float> m_dSeperationZ;
thrust::device_vector<float> m_dAlignmentX;
thrust::device_vector<float> m_dAlignmentZ;
//thrust::device_vector<float> d_Pos(NUM_BOIDS*3);
// cant set size when constructing as member variable so resize here instead
m_dBoidsPosX.resize(NUM_BOIDS);
m_dBoidsPosZ.resize(NUM_BOIDS);
m_dBoidsVelX.resize(NUM_BOIDS);
m_dBoidsVelZ.resize(NUM_BOIDS);
m_dCohesionX.resize(NUM_BOIDS);
m_dCohesionZ.resize(NUM_BOIDS);
m_dSeperationX.resize(NUM_BOIDS);
m_dSeperationZ.resize(NUM_BOIDS);
m_dAlignmentX.resize(NUM_BOIDS);
m_dAlignmentZ.resize(NUM_BOIDS);
// fill vector with random values for pos
thrust::device_vector <float> tmp_PosPnts(NUM_BOIDS*4);
float * tmp_PosPnts_ptr = thrust::raw_pointer_cast(&tmp_PosPnts[0]);
randFloats(tmp_PosPnts_ptr, NUM_BOIDS*4);
// give random start positions
m_dBoidsPosX.assign(tmp_PosPnts.begin(), tmp_PosPnts.begin() + NUM_BOIDS);
m_dBoidsPosZ.assign(tmp_PosPnts.begin() + NUM_BOIDS, tmp_PosPnts.begin() + 2*NUM_BOIDS);
// give random start vel
m_dBoidsVelX.assign(tmp_PosPnts.begin() + 2*NUM_BOIDS, tmp_PosPnts.begin() + 3*NUM_BOIDS);
m_dBoidsVelZ.assign(tmp_PosPnts.begin() + 3*NUM_BOIDS, tmp_PosPnts.begin() + 4*NUM_BOIDS);
// create pointers pointing to the device vectors
float * m_dBoidsPosX_ptr= thrust::raw_pointer_cast(&m_dBoidsPosX[0]);
float * m_dBoidsPosZ_ptr= thrust::raw_pointer_cast(&m_dBoidsPosZ[0]);
float * m_dBoidsVelX_ptr= thrust::raw_pointer_cast(&m_dBoidsVelX[0]);
float * m_dBoidsVelZ_ptr= thrust::raw_pointer_cast(&m_dBoidsVelZ[0]);
float * m_dCohesionX_ptr= thrust::raw_pointer_cast(&m_dCohesionX[0]);
float * m_dCohesionZ_ptr= thrust::raw_pointer_cast(&m_dCohesionZ[0]);
float * m_dSeperationX_ptr= thrust::raw_pointer_cast(&m_dSeperationX[0]);
float * m_dSeperationZ_ptr= thrust::raw_pointer_cast(&m_dSeperationZ[0]);
float * m_dAlignmentX_ptr= thrust::raw_pointer_cast(&m_dAlignmentX[0]);
float * m_dAlignmentZ_ptr= thrust::raw_pointer_cast(&m_dAlignmentZ[0]);
//UPDATE-----------------------------------------------------------------------------
unsigned int nThreads = 1024;
unsigned int nBlocks = NUM_BOIDS/ nThreads + 1;
//thrust::device_vector<unsigned int> d_numNeighbourBoids(GRID_RESOLUTION*GRID_RESOLUTION, NULL_CELL);
//unsigned int * d_numNeighbourBoids_ptr = thrust::raw_pointer_cast(&d_numNeighbourBoids[0]);
// for nearest neighbour
unsigned int blockN = NUM_BOIDS / 32 + 1;
dim3 block2(32, 32); // block of (X,Y) threads
dim3 grid2(blockN, 1); // grid blockN * blockN blocks
for(int i = 0; i<NUM_FRAMES; i++)
{
// reset vectors
thrust::fill(m_dCohesionX.begin(), m_dCohesionX.begin() + NUM_BOIDS, 0);
thrust::fill(m_dCohesionZ.begin(), m_dCohesionZ.begin() + NUM_BOIDS, 0);
thrust::fill(m_dSeperationX.begin(), m_dSeperationX.begin() + NUM_BOIDS, 0);
thrust::fill(m_dSeperationZ.begin(), m_dSeperationZ.begin() + NUM_BOIDS, 0);
thrust::fill(m_dAlignmentX.begin(), m_dAlignmentX.begin() + NUM_BOIDS, 0);
thrust::fill(m_dAlignmentZ.begin(), m_dAlignmentZ.begin() + NUM_BOIDS, 0);
flock_kernal<<<grid2,block2>>>(m_dCohesionX_ptr, m_dCohesionZ_ptr, m_dSeperationX_ptr, m_dSeperationZ_ptr, m_dAlignmentX_ptr, m_dAlignmentZ_ptr, m_dBoidsPosX_ptr, m_dBoidsPosZ_ptr, m_dBoidsVelX_ptr, m_dBoidsVelZ_ptr, NUM_BOIDS);
hipDeviceSynchronize();
limitVel_kernal<<<nBlocks,nThreads>>>(0.02, m_dBoidsPosX_ptr, m_dBoidsPosZ_ptr, m_dBoidsVelX_ptr, m_dBoidsVelZ_ptr, NUM_BOIDS);
hipDeviceSynchronize();
avoidBoundaries_kernal<<<nBlocks,1024>>>(m_dBoidsPosX_ptr, m_dBoidsPosZ_ptr, m_dBoidsVelX_ptr, m_dBoidsVelZ_ptr, NUM_BOIDS);
hipDeviceSynchronize();
updatePos_kernal<<<nBlocks,1024>>>(m_dBoidsPosX_ptr, m_dBoidsPosZ_ptr, m_dBoidsVelX_ptr, m_dBoidsVelZ_ptr, NUM_BOIDS);
hipDeviceSynchronize();
dumpGeo(i, m_dBoidsPosX, m_dBoidsPosZ);
}
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /**
@brief Compare vector sum calculation functions in CPU vs GPU.
@file 00.cu
@author isquicha
@version 0.1.0
*/
#include <stdio.h>
#include <time.h>
// Cuda headers are on CUDA Toolkit instalation path/VERSION/include
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define N_THREADS 1024
#define LOOP_TIMES 10000000
/*
The functions run too fast to see the difference, so to compare
we run then in a loop.
In my environment
*/
/**
* Sum numbers of two vectors on GPU.
*
* The result is stored on a third vector.
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @param C Output vector pointer.
* @return void
*/
__global__ void dMatAdd(float *A, float *B, float *C)
{
int i = threadIdx.x;
for (long j = 0; j < LOOP_TIMES; j++)
C[i] = A[i] + B[i];
}
/**
* Sum numbers of two vectors on CPU.
*
* The result is stored on a third vector.
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @param C Output vector pointer.
* @return void
*/
void hMatAdd(float *A, float *B, float *C)
{
for (int i = 0; i < N_THREADS; i++)
{
for (long j = 0; j < LOOP_TIMES; j++)
{
C[i] = A[i] + B[i];
}
}
}
/**
* Compare two float vectors
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @return true if vectors are equal, false otherwise
*/
bool compare(float *A, float *B)
{
for (int i = 0; i < N_THREADS; i++)
{
if (A[i] != B[i])
{
printf(
"Elements are not equal. Index %d\t\tA: %f\t\tB:%f\n",
i, A[i], B[i]);
return false;
}
}
return true;
}
int main(int argc, char const *argv[])
{
// Timing variables
clock_t h_begin, h_end, d_begin, d_end;
double h_time, d_time;
// Size auxiliar
size_t size = N_THREADS * sizeof(float);
// Host memory allocation
float *hA = (float *)malloc(size);
float *hB = (float *)malloc(size);
float *hC = (float *)malloc(size);
float *hC2 = (float *)malloc(size);
if (hA == NULL || hB == NULL || hC == NULL || hC2 == NULL)
{
printf("Malloc error!\n");
exit(1);
}
// Device memory allocation
float *dA, *dB, *dC;
cudaMalloc((void **)&dA, size);
cudaMalloc((void **)&dB, size);
cudaMalloc((void **)&dC, size);
// Vectors initialization with some values
for (int i = 0, j = N_THREADS; i < N_THREADS; i++, j--)
{
hA[i] = float(i);
//printf("hA[%d] = %f\n", i, hA[i]);
hB[i] = float(j);
}
// Host function
printf("Running Host function\n");
h_begin = clock();
hMatAdd(hA, hB, hC);
h_end = clock();
// Device function
printf("Running Device function\n");
d_begin = clock();
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice);
dMatAdd<<<1, N_THREADS>>>(dA, dB, dC);
cudaMemcpy(hC2, dC, size, cudaMemcpyDeviceToHost);
d_end = clock();
// Results
h_time = (double)(h_end - h_begin) / CLOCKS_PER_SEC;
d_time = (double)(d_end - d_begin) / CLOCKS_PER_SEC;
printf("Running Compare function\n");
printf("Vectors are equal?: %s\n", compare(hC, hC2) ? "true" : "false");
printf("CPU: %f seconds\n", h_time);
printf("GPU: %f seconds\n", d_time);
// Memory free
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
free(hA);
free(hB);
free(hC);
free(hC2);
return 0;
} | code for sm_80
Function : _Z7dMatAddPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ UMOV UR5, 0x989680 ; /* 0x0098968000057882 */
/* 0x000fe40000000000 */
/*0050*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fca0008000000 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*0090*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000ea8000c1e1900 */
/*00a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001ea4000c1e1900 */
/*00b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x004fca0000000000 */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*00d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000ea8000c1e1900 */
/*00e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x000ea4000c1e1900 */
/*00f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x004fca0000000000 */
/*0100*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0003e8000c101906 */
/*0110*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000ea8000c1e1900 */
/*0120*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x000ea4000c1e1900 */
/*0130*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x004fca0000000000 */
/*0140*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0005e8000c101906 */
/*0150*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000ee8000c1e1900 */
/*0160*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x000ee4000c1e1900 */
/*0170*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x008fca0000000000 */
/*0180*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0007e8000c101906 */
/*0190*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*01a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001f24000c1e1900 */
/*01b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x010fca0000000000 */
/*01c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*01d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*01e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x002f24000c1e1900 */
/*01f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x010fca0000000000 */
/*0200*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0003e8000c101906 */
/*0210*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0220*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x004f24000c1e1900 */
/*0230*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x010fca0000000000 */
/*0240*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0005e8000c101906 */
/*0250*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0260*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x008f24000c1e1900 */
/*0270*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x010fca0000000000 */
/*0280*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0007e8000c101906 */
/*0290*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*02a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001f24000c1e1900 */
/*02b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x010fca0000000000 */
/*02c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*02d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*02e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x002f24000c1e1900 */
/*02f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x010fca0000000000 */
/*0300*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0003e8000c101906 */
/*0310*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0320*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x004f24000c1e1900 */
/*0330*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x010fca0000000000 */
/*0340*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0005e8000c101906 */
/*0350*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0360*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x008f24000c1e1900 */
/*0370*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x010fca0000000000 */
/*0380*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0007e8000c101906 */
/*0390*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*03a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001f24000c1e1900 */
/*03b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x010fca0000000000 */
/*03c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*03d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*03e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x002f24000c1e1900 */
/*03f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x010fca0000000000 */
/*0400*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0003e8000c101906 */
/*0410*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0420*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x004f24000c1e1900 */
/*0430*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x010fca0000000000 */
/*0440*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0005e8000c101906 */
/*0450*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0460*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x008f24000c1e1900 */
/*0470*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x010fca0000000000 */
/*0480*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0007e8000c101906 */
/*0490*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*04a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001f24000c1e1900 */
/*04b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x010fca0000000000 */
/*04c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*04d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*04e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x002f24000c1e1900 */
/*04f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x010fca0000000000 */
/*0500*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0003e8000c101906 */
/*0510*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0520*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x004f24000c1e1900 */
/*0530*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x010fca0000000000 */
/*0540*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0005e8000c101906 */
/*0550*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0560*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x008f24000c1e1900 */
/*0570*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x010fca0000000000 */
/*0580*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0007e8000c101906 */
/*0590*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*05a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001f24000c1e1900 */
/*05b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x010fca0000000000 */
/*05c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*05d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*05e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x002f24000c1e1900 */
/*05f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x010fca0000000000 */
/*0600*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0003e8000c101906 */
/*0610*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0620*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x004f24000c1e1900 */
/*0630*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x010fca0000000000 */
/*0640*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0005e8000c101906 */
/*0650*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0660*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x008f24000c1e1900 */
/*0670*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x010fca0000000000 */
/*0680*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0007e8000c101906 */
/*0690*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*06a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001f24000c1e1900 */
/*06b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x010fca0000000000 */
/*06c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*06d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*06e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x002f24000c1e1900 */
/*06f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x010fca0000000000 */
/*0700*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0003e8000c101906 */
/*0710*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0720*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x004f24000c1e1900 */
/*0730*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x010fca0000000000 */
/*0740*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0005e8000c101906 */
/*0750*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0760*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x008f24000c1e1900 */
/*0770*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x010fca0000000000 */
/*0780*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0007e8000c101906 */
/*0790*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*07a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001f24000c1e1900 */
/*07b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x010fca0000000000 */
/*07c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*07d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*07e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x002f24000c1e1900 */
/*07f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x010fca0000000000 */
/*0800*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0001e8000c101906 */
/*0810*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0820*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x004f22000c1e1900 */
/*0830*/ UIADD3 UR5, UP0, UR5, -0x20, URZ ; /* 0xffffffe005057890 */
/* 0x000fc8000ff1e03f */
/*0840*/ UIADD3.X UR4, UR4, -0x1, URZ, UP0, !UPT ; /* 0xffffffff04047890 */
/* 0x000fe200087fe43f */
/*0850*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x010fca0000000000 */
/*0860*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0001e8000c101906 */
/*0870*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000ea8000c1e1900 */
/*0880*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x008ea2000c1e1900 */
/*0890*/ ISETP.NE.U32.AND P0, PT, RZ, UR5, PT ; /* 0x00000005ff007c0c */
/* 0x000fc8000bf05070 */
/*08a0*/ ISETP.NE.AND.EX P0, PT, RZ, UR4, PT, P0 ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf05300 */
/*08b0*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x004fca0000000000 */
/*08c0*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0001ee000c101906 */
/*08d0*/ @P0 BRA 0x90 ; /* 0xfffff7b000000947 */
/* 0x000fea000383ffff */
/*08e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*08f0*/ BRA 0x8f0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0900*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0910*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0920*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0930*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0940*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0950*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0960*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0970*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /**
@brief Compare vector sum calculation functions in CPU vs GPU.
@file 00.cu
@author isquicha
@version 0.1.0
*/
#include <stdio.h>
#include <time.h>
// Cuda headers are on CUDA Toolkit instalation path/VERSION/include
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define N_THREADS 1024
#define LOOP_TIMES 10000000
/*
The functions run too fast to see the difference, so to compare
we run then in a loop.
In my environment
*/
/**
* Sum numbers of two vectors on GPU.
*
* The result is stored on a third vector.
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @param C Output vector pointer.
* @return void
*/
__global__ void dMatAdd(float *A, float *B, float *C)
{
int i = threadIdx.x;
for (long j = 0; j < LOOP_TIMES; j++)
C[i] = A[i] + B[i];
}
/**
* Sum numbers of two vectors on CPU.
*
* The result is stored on a third vector.
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @param C Output vector pointer.
* @return void
*/
void hMatAdd(float *A, float *B, float *C)
{
for (int i = 0; i < N_THREADS; i++)
{
for (long j = 0; j < LOOP_TIMES; j++)
{
C[i] = A[i] + B[i];
}
}
}
/**
* Compare two float vectors
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @return true if vectors are equal, false otherwise
*/
bool compare(float *A, float *B)
{
for (int i = 0; i < N_THREADS; i++)
{
if (A[i] != B[i])
{
printf(
"Elements are not equal. Index %d\t\tA: %f\t\tB:%f\n",
i, A[i], B[i]);
return false;
}
}
return true;
}
int main(int argc, char const *argv[])
{
// Timing variables
clock_t h_begin, h_end, d_begin, d_end;
double h_time, d_time;
// Size auxiliar
size_t size = N_THREADS * sizeof(float);
// Host memory allocation
float *hA = (float *)malloc(size);
float *hB = (float *)malloc(size);
float *hC = (float *)malloc(size);
float *hC2 = (float *)malloc(size);
if (hA == NULL || hB == NULL || hC == NULL || hC2 == NULL)
{
printf("Malloc error!\n");
exit(1);
}
// Device memory allocation
float *dA, *dB, *dC;
cudaMalloc((void **)&dA, size);
cudaMalloc((void **)&dB, size);
cudaMalloc((void **)&dC, size);
// Vectors initialization with some values
for (int i = 0, j = N_THREADS; i < N_THREADS; i++, j--)
{
hA[i] = float(i);
//printf("hA[%d] = %f\n", i, hA[i]);
hB[i] = float(j);
}
// Host function
printf("Running Host function\n");
h_begin = clock();
hMatAdd(hA, hB, hC);
h_end = clock();
// Device function
printf("Running Device function\n");
d_begin = clock();
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice);
dMatAdd<<<1, N_THREADS>>>(dA, dB, dC);
cudaMemcpy(hC2, dC, size, cudaMemcpyDeviceToHost);
d_end = clock();
// Results
h_time = (double)(h_end - h_begin) / CLOCKS_PER_SEC;
d_time = (double)(d_end - d_begin) / CLOCKS_PER_SEC;
printf("Running Compare function\n");
printf("Vectors are equal?: %s\n", compare(hC, hC2) ? "true" : "false");
printf("CPU: %f seconds\n", h_time);
printf("GPU: %f seconds\n", d_time);
// Memory free
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
free(hA);
free(hB);
free(hC);
free(hC2);
return 0;
} | .file "tmpxft_00065296_00000000-6_00.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7hMatAddPfS_S_
.type _Z7hMatAddPfS_S_, @function
_Z7hMatAddPfS_S_:
.LFB2057:
.cfi_startproc
endbr64
movq %rdx, %rcx
movq %rdi, %rdx
addq $4096, %rdi
.L4:
movl $10000000, %eax
.L5:
movss (%rdx), %xmm0
addss (%rsi), %xmm0
movss %xmm0, (%rcx)
subq $1, %rax
jne .L5
addq $4, %rdx
addq $4, %rsi
addq $4, %rcx
cmpq %rdi, %rdx
jne .L4
ret
.cfi_endproc
.LFE2057:
.size _Z7hMatAddPfS_S_, .-_Z7hMatAddPfS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Elements are not equal. Index %d\t\tA: %f\t\tB:%f\n"
.text
.globl _Z7comparePfS_
.type _Z7comparePfS_, @function
_Z7comparePfS_:
.LFB2058:
.cfi_startproc
endbr64
movl $0, %edx
.L12:
movss (%rdi,%rdx,4), %xmm0
movss (%rsi,%rdx,4), %xmm1
ucomiss %xmm1, %xmm0
jp .L13
jne .L13
addq $1, %rdx
cmpq $1024, %rdx
jne .L12
movl $1, %eax
ret
.L13:
subq $8, %rsp
.cfi_def_cfa_offset 16
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z7comparePfS_, .-_Z7comparePfS_
.globl _Z30__device_stub__Z7dMatAddPfS_S_PfS_S_
.type _Z30__device_stub__Z7dMatAddPfS_S_PfS_S_, @function
_Z30__device_stub__Z7dMatAddPfS_S_PfS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L22
.L18:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L23
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L22:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7dMatAddPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L18
.L23:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z30__device_stub__Z7dMatAddPfS_S_PfS_S_, .-_Z30__device_stub__Z7dMatAddPfS_S_PfS_S_
.globl _Z7dMatAddPfS_S_
.type _Z7dMatAddPfS_S_, @function
_Z7dMatAddPfS_S_:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z7dMatAddPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z7dMatAddPfS_S_, .-_Z7dMatAddPfS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "true"
.LC2:
.string "false"
.LC3:
.string "Malloc error!\n"
.LC4:
.string "Running Host function\n"
.LC5:
.string "Running Device function\n"
.LC7:
.string "Running Compare function\n"
.LC8:
.string "Vectors are equal?: %s\n"
.LC9:
.string "CPU: %f seconds\n"
.LC10:
.string "GPU: %f seconds\n"
.text
.globl main
.type main, @function
main:
.LFB2059:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $4096, %edi
call malloc@PLT
movq %rax, %rbp
movl $4096, %edi
call malloc@PLT
movq %rax, %rbx
movl $4096, %edi
call malloc@PLT
movq %rax, %r13
movl $4096, %edi
call malloc@PLT
testq %rbp, %rbp
je .L27
movq %rax, %r12
testq %rbx, %rbx
je .L27
testq %r13, %r13
je .L27
testq %rax, %rax
je .L27
leaq 24(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
movl $0, %eax
movl $1024, %ecx
.L30:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rax,4)
movl %ecx, %edx
subl %eax, %edx
pxor %xmm0, %xmm0
cvtsi2ssl %edx, %xmm0
movss %xmm0, (%rbx,%rax,4)
addq $1, %rax
cmpq $1024, %rax
jne .L30
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call clock@PLT
movq %rax, %r15
movq %r13, %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z7hMatAddPfS_S_
call clock@PLT
movq %rax, 8(%rsp)
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call clock@PLT
movq %rax, %r14
movl $1, %ecx
movl $4096, %edx
movq %rbp, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4096, %edx
movq %rbx, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $1024, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L37
.L31:
movl $2, %ecx
movl $4096, %edx
movq 40(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
call clock@PLT
movq 8(%rsp), %rdx
subq %r15, %rdx
pxor %xmm1, %xmm1
cvtsi2sdq %rdx, %xmm1
movsd .LC6(%rip), %xmm0
divsd %xmm0, %xmm1
movq %xmm1, %r15
subq %r14, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
divsd %xmm0, %xmm1
movq %xmm1, %r14
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r12, %rsi
movq %r13, %rdi
call _Z7comparePfS_
testb %al, %al
leaq .LC2(%rip), %rdx
leaq .LC1(%rip), %rax
cmovne %rax, %rdx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r15, %xmm0
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %r14, %xmm0
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L38
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L27:
.cfi_restore_state
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L37:
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z30__device_stub__Z7dMatAddPfS_S_PfS_S_
jmp .L31
.L38:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size main, .-main
.section .rodata.str1.1
.LC11:
.string "_Z7dMatAddPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z7dMatAddPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC6:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /**
@brief Compare vector sum calculation functions in CPU vs GPU.
@file 00.cu
@author isquicha
@version 0.1.0
*/
#include <stdio.h>
#include <time.h>
// Cuda headers are on CUDA Toolkit instalation path/VERSION/include
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define N_THREADS 1024
#define LOOP_TIMES 10000000
/*
The functions run too fast to see the difference, so to compare
we run then in a loop.
In my environment
*/
/**
* Sum numbers of two vectors on GPU.
*
* The result is stored on a third vector.
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @param C Output vector pointer.
* @return void
*/
__global__ void dMatAdd(float *A, float *B, float *C)
{
int i = threadIdx.x;
for (long j = 0; j < LOOP_TIMES; j++)
C[i] = A[i] + B[i];
}
/**
* Sum numbers of two vectors on CPU.
*
* The result is stored on a third vector.
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @param C Output vector pointer.
* @return void
*/
void hMatAdd(float *A, float *B, float *C)
{
for (int i = 0; i < N_THREADS; i++)
{
for (long j = 0; j < LOOP_TIMES; j++)
{
C[i] = A[i] + B[i];
}
}
}
/**
* Compare two float vectors
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @return true if vectors are equal, false otherwise
*/
bool compare(float *A, float *B)
{
for (int i = 0; i < N_THREADS; i++)
{
if (A[i] != B[i])
{
printf(
"Elements are not equal. Index %d\t\tA: %f\t\tB:%f\n",
i, A[i], B[i]);
return false;
}
}
return true;
}
int main(int argc, char const *argv[])
{
// Timing variables
clock_t h_begin, h_end, d_begin, d_end;
double h_time, d_time;
// Size auxiliar
size_t size = N_THREADS * sizeof(float);
// Host memory allocation
float *hA = (float *)malloc(size);
float *hB = (float *)malloc(size);
float *hC = (float *)malloc(size);
float *hC2 = (float *)malloc(size);
if (hA == NULL || hB == NULL || hC == NULL || hC2 == NULL)
{
printf("Malloc error!\n");
exit(1);
}
// Device memory allocation
float *dA, *dB, *dC;
cudaMalloc((void **)&dA, size);
cudaMalloc((void **)&dB, size);
cudaMalloc((void **)&dC, size);
// Vectors initialization with some values
for (int i = 0, j = N_THREADS; i < N_THREADS; i++, j--)
{
hA[i] = float(i);
//printf("hA[%d] = %f\n", i, hA[i]);
hB[i] = float(j);
}
// Host function
printf("Running Host function\n");
h_begin = clock();
hMatAdd(hA, hB, hC);
h_end = clock();
// Device function
printf("Running Device function\n");
d_begin = clock();
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice);
dMatAdd<<<1, N_THREADS>>>(dA, dB, dC);
cudaMemcpy(hC2, dC, size, cudaMemcpyDeviceToHost);
d_end = clock();
// Results
h_time = (double)(h_end - h_begin) / CLOCKS_PER_SEC;
d_time = (double)(d_end - d_begin) / CLOCKS_PER_SEC;
printf("Running Compare function\n");
printf("Vectors are equal?: %s\n", compare(hC, hC2) ? "true" : "false");
printf("CPU: %f seconds\n", h_time);
printf("GPU: %f seconds\n", d_time);
// Memory free
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
free(hA);
free(hB);
free(hC);
free(hC2);
return 0;
} | /**
@brief Compare vector sum calculation functions in CPU vs GPU.
@file 00.cu
@author isquicha
@version 0.1.0
*/
#include <stdio.h>
#include <time.h>
// Cuda headers are on CUDA Toolkit instalation path/VERSION/include
#include "hip/hip_runtime.h"
#define N_THREADS 1024
#define LOOP_TIMES 10000000
/*
The functions run too fast to see the difference, so to compare
we run then in a loop.
In my environment
*/
/**
* Sum numbers of two vectors on GPU.
*
* The result is stored on a third vector.
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @param C Output vector pointer.
* @return void
*/
__global__ void dMatAdd(float *A, float *B, float *C)
{
int i = threadIdx.x;
for (long j = 0; j < LOOP_TIMES; j++)
C[i] = A[i] + B[i];
}
/**
* Sum numbers of two vectors on CPU.
*
* The result is stored on a third vector.
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @param C Output vector pointer.
* @return void
*/
void hMatAdd(float *A, float *B, float *C)
{
for (int i = 0; i < N_THREADS; i++)
{
for (long j = 0; j < LOOP_TIMES; j++)
{
C[i] = A[i] + B[i];
}
}
}
/**
* Compare two float vectors
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @return true if vectors are equal, false otherwise
*/
bool compare(float *A, float *B)
{
for (int i = 0; i < N_THREADS; i++)
{
if (A[i] != B[i])
{
printf(
"Elements are not equal. Index %d\t\tA: %f\t\tB:%f\n",
i, A[i], B[i]);
return false;
}
}
return true;
}
int main(int argc, char const *argv[])
{
// Timing variables
clock_t h_begin, h_end, d_begin, d_end;
double h_time, d_time;
// Size auxiliar
size_t size = N_THREADS * sizeof(float);
// Host memory allocation
float *hA = (float *)malloc(size);
float *hB = (float *)malloc(size);
float *hC = (float *)malloc(size);
float *hC2 = (float *)malloc(size);
if (hA == NULL || hB == NULL || hC == NULL || hC2 == NULL)
{
printf("Malloc error!\n");
exit(1);
}
// Device memory allocation
float *dA, *dB, *dC;
hipMalloc((void **)&dA, size);
hipMalloc((void **)&dB, size);
hipMalloc((void **)&dC, size);
// Vectors initialization with some values
for (int i = 0, j = N_THREADS; i < N_THREADS; i++, j--)
{
hA[i] = float(i);
//printf("hA[%d] = %f\n", i, hA[i]);
hB[i] = float(j);
}
// Host function
printf("Running Host function\n");
h_begin = clock();
hMatAdd(hA, hB, hC);
h_end = clock();
// Device function
printf("Running Device function\n");
d_begin = clock();
hipMemcpy(dA, hA, size, hipMemcpyHostToDevice);
hipMemcpy(dB, hB, size, hipMemcpyHostToDevice);
dMatAdd<<<1, N_THREADS>>>(dA, dB, dC);
hipMemcpy(hC2, dC, size, hipMemcpyDeviceToHost);
d_end = clock();
// Results
h_time = (double)(h_end - h_begin) / CLOCKS_PER_SEC;
d_time = (double)(d_end - d_begin) / CLOCKS_PER_SEC;
printf("Running Compare function\n");
printf("Vectors are equal?: %s\n", compare(hC, hC2) ? "true" : "false");
printf("CPU: %f seconds\n", h_time);
printf("GPU: %f seconds\n", d_time);
// Memory free
hipFree(dA);
hipFree(dB);
hipFree(dC);
free(hA);
free(hB);
free(hC);
free(hC2);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /**
@brief Compare vector sum calculation functions in CPU vs GPU.
@file 00.cu
@author isquicha
@version 0.1.0
*/
#include <stdio.h>
#include <time.h>
// Cuda headers are on CUDA Toolkit instalation path/VERSION/include
#include "hip/hip_runtime.h"
#define N_THREADS 1024
#define LOOP_TIMES 10000000
/*
The functions run too fast to see the difference, so to compare
we run then in a loop.
In my environment
*/
/**
* Sum numbers of two vectors on GPU.
*
* The result is stored on a third vector.
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @param C Output vector pointer.
* @return void
*/
__global__ void dMatAdd(float *A, float *B, float *C)
{
int i = threadIdx.x;
for (long j = 0; j < LOOP_TIMES; j++)
C[i] = A[i] + B[i];
}
/**
* Sum numbers of two vectors on CPU.
*
* The result is stored on a third vector.
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @param C Output vector pointer.
* @return void
*/
void hMatAdd(float *A, float *B, float *C)
{
for (int i = 0; i < N_THREADS; i++)
{
for (long j = 0; j < LOOP_TIMES; j++)
{
C[i] = A[i] + B[i];
}
}
}
/**
* Compare two float vectors
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @return true if vectors are equal, false otherwise
*/
bool compare(float *A, float *B)
{
for (int i = 0; i < N_THREADS; i++)
{
if (A[i] != B[i])
{
printf(
"Elements are not equal. Index %d\t\tA: %f\t\tB:%f\n",
i, A[i], B[i]);
return false;
}
}
return true;
}
int main(int argc, char const *argv[])
{
// Timing variables
clock_t h_begin, h_end, d_begin, d_end;
double h_time, d_time;
// Size auxiliar
size_t size = N_THREADS * sizeof(float);
// Host memory allocation
float *hA = (float *)malloc(size);
float *hB = (float *)malloc(size);
float *hC = (float *)malloc(size);
float *hC2 = (float *)malloc(size);
if (hA == NULL || hB == NULL || hC == NULL || hC2 == NULL)
{
printf("Malloc error!\n");
exit(1);
}
// Device memory allocation
float *dA, *dB, *dC;
hipMalloc((void **)&dA, size);
hipMalloc((void **)&dB, size);
hipMalloc((void **)&dC, size);
// Vectors initialization with some values
for (int i = 0, j = N_THREADS; i < N_THREADS; i++, j--)
{
hA[i] = float(i);
//printf("hA[%d] = %f\n", i, hA[i]);
hB[i] = float(j);
}
// Host function
printf("Running Host function\n");
h_begin = clock();
hMatAdd(hA, hB, hC);
h_end = clock();
// Device function
printf("Running Device function\n");
d_begin = clock();
hipMemcpy(dA, hA, size, hipMemcpyHostToDevice);
hipMemcpy(dB, hB, size, hipMemcpyHostToDevice);
dMatAdd<<<1, N_THREADS>>>(dA, dB, dC);
hipMemcpy(hC2, dC, size, hipMemcpyDeviceToHost);
d_end = clock();
// Results
h_time = (double)(h_end - h_begin) / CLOCKS_PER_SEC;
d_time = (double)(d_end - d_begin) / CLOCKS_PER_SEC;
printf("Running Compare function\n");
printf("Vectors are equal?: %s\n", compare(hC, hC2) ? "true" : "false");
printf("CPU: %f seconds\n", h_time);
printf("GPU: %f seconds\n", d_time);
// Memory free
hipFree(dA);
hipFree(dB);
hipFree(dC);
free(hA);
free(hB);
free(hC);
free(hC2);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7dMatAddPfS_S_
.globl _Z7dMatAddPfS_S_
.p2align 8
.type _Z7dMatAddPfS_S_,@function
_Z7dMatAddPfS_S_:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
v_lshlrev_b32_e32 v4, 2, v0
s_mov_b64 s[0:1], 0x989680
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v0, s4, s4, v4
v_add_co_ci_u32_e64 v1, null, s5, 0, s4
v_add_co_u32 v2, s4, s6, v4
v_add_co_u32 v4, s2, s2, v4
v_add_co_ci_u32_e64 v3, null, s7, 0, s4
v_add_co_ci_u32_e64 v5, null, s3, 0, s2
.LBB0_1:
global_load_b32 v6, v[0:1], off
global_load_b32 v7, v[2:3], off
s_add_u32 s0, s0, -1
s_addc_u32 s1, s1, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u64 s[0:1], 0
s_waitcnt vmcnt(0)
v_add_f32_e32 v6, v6, v7
global_store_b32 v[4:5], v6, off
s_cbranch_scc0 .LBB0_1
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7dMatAddPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7dMatAddPfS_S_, .Lfunc_end0-_Z7dMatAddPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7dMatAddPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z7dMatAddPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /**
@brief Compare vector sum calculation functions in CPU vs GPU.
@file 00.cu
@author isquicha
@version 0.1.0
*/
#include <stdio.h>
#include <time.h>
// Cuda headers are on CUDA Toolkit instalation path/VERSION/include
#include "hip/hip_runtime.h"
#define N_THREADS 1024
#define LOOP_TIMES 10000000
/*
The functions run too fast to see the difference, so to compare
we run then in a loop.
In my environment
*/
/**
* Sum numbers of two vectors on GPU.
*
* The result is stored on a third vector.
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @param C Output vector pointer.
* @return void
*/
__global__ void dMatAdd(float *A, float *B, float *C)
{
int i = threadIdx.x;
for (long j = 0; j < LOOP_TIMES; j++)
C[i] = A[i] + B[i];
}
/**
* Sum numbers of two vectors on CPU.
*
* The result is stored on a third vector.
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @param C Output vector pointer.
* @return void
*/
void hMatAdd(float *A, float *B, float *C)
{
for (int i = 0; i < N_THREADS; i++)
{
for (long j = 0; j < LOOP_TIMES; j++)
{
C[i] = A[i] + B[i];
}
}
}
/**
* Compare two float vectors
*
* @param A First input vector pointer.
* @param B Second input vector pointer.
* @return true if vectors are equal, false otherwise
*/
bool compare(float *A, float *B)
{
for (int i = 0; i < N_THREADS; i++)
{
if (A[i] != B[i])
{
printf(
"Elements are not equal. Index %d\t\tA: %f\t\tB:%f\n",
i, A[i], B[i]);
return false;
}
}
return true;
}
int main(int argc, char const *argv[])
{
// Timing variables
clock_t h_begin, h_end, d_begin, d_end;
double h_time, d_time;
// Size auxiliar
size_t size = N_THREADS * sizeof(float);
// Host memory allocation
float *hA = (float *)malloc(size);
float *hB = (float *)malloc(size);
float *hC = (float *)malloc(size);
float *hC2 = (float *)malloc(size);
if (hA == NULL || hB == NULL || hC == NULL || hC2 == NULL)
{
printf("Malloc error!\n");
exit(1);
}
// Device memory allocation
float *dA, *dB, *dC;
hipMalloc((void **)&dA, size);
hipMalloc((void **)&dB, size);
hipMalloc((void **)&dC, size);
// Vectors initialization with some values
for (int i = 0, j = N_THREADS; i < N_THREADS; i++, j--)
{
hA[i] = float(i);
//printf("hA[%d] = %f\n", i, hA[i]);
hB[i] = float(j);
}
// Host function
printf("Running Host function\n");
h_begin = clock();
hMatAdd(hA, hB, hC);
h_end = clock();
// Device function
printf("Running Device function\n");
d_begin = clock();
hipMemcpy(dA, hA, size, hipMemcpyHostToDevice);
hipMemcpy(dB, hB, size, hipMemcpyHostToDevice);
dMatAdd<<<1, N_THREADS>>>(dA, dB, dC);
hipMemcpy(hC2, dC, size, hipMemcpyDeviceToHost);
d_end = clock();
// Results
h_time = (double)(h_end - h_begin) / CLOCKS_PER_SEC;
d_time = (double)(d_end - d_begin) / CLOCKS_PER_SEC;
printf("Running Compare function\n");
printf("Vectors are equal?: %s\n", compare(hC, hC2) ? "true" : "false");
printf("CPU: %f seconds\n", h_time);
printf("GPU: %f seconds\n", d_time);
// Memory free
hipFree(dA);
hipFree(dB);
hipFree(dC);
free(hA);
free(hB);
free(hC);
free(hC2);
return 0;
} | .text
.file "00.hip"
.globl _Z22__device_stub__dMatAddPfS_S_ # -- Begin function _Z22__device_stub__dMatAddPfS_S_
.p2align 4, 0x90
.type _Z22__device_stub__dMatAddPfS_S_,@function
_Z22__device_stub__dMatAddPfS_S_: # @_Z22__device_stub__dMatAddPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7dMatAddPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z22__device_stub__dMatAddPfS_S_, .Lfunc_end0-_Z22__device_stub__dMatAddPfS_S_
.cfi_endproc
# -- End function
.globl _Z7hMatAddPfS_S_ # -- Begin function _Z7hMatAddPfS_S_
.p2align 4, 0x90
.type _Z7hMatAddPfS_S_,@function
_Z7hMatAddPfS_S_: # @_Z7hMatAddPfS_S_
.cfi_startproc
# %bb.0:
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_2 Depth 2
movl $10000000, %ecx # imm = 0x989680
.p2align 4, 0x90
.LBB1_2: # Parent Loop BB1_1 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rdi,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%rsi,%rax,4), %xmm0
movss %xmm0, (%rdx,%rax,4)
decq %rcx
jne .LBB1_2
# %bb.3: # in Loop: Header=BB1_1 Depth=1
incq %rax
cmpq $1024, %rax # imm = 0x400
jne .LBB1_1
# %bb.4:
retq
.Lfunc_end1:
.size _Z7hMatAddPfS_S_, .Lfunc_end1-_Z7hMatAddPfS_S_
.cfi_endproc
# -- End function
.globl _Z7comparePfS_ # -- Begin function _Z7comparePfS_
.p2align 4, 0x90
.type _Z7comparePfS_,@function
_Z7comparePfS_: # @_Z7comparePfS_
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movq %rsi, %rax
movss (%rdi), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%rsi), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorl %esi, %esi
ucomiss %xmm1, %xmm0
movl $0, %ebx
jne .LBB2_5
jp .LBB2_5
# %bb.1: # %.lr.ph.preheader
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
cmpq $1023, %rsi # imm = 0x3FF
je .LBB2_6
# %bb.3: # in Loop: Header=BB2_2 Depth=1
movss 4(%rdi,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss 4(%rax,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
incq %rsi
ucomiss %xmm1, %xmm0
jne .LBB2_4
jnp .LBB2_2
.LBB2_4: # %._crit_edge
leaq -1(%rsi), %rax
cmpq $1023, %rax # imm = 0x3FF
setae %bl
.LBB2_5:
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl $.L.str, %edi
# kill: def $esi killed $esi killed $rsi
movb $2, %al
callq printf
.LBB2_7: # %.loopexit
movl %ebx, %eax
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB2_6: # %.loopexit.loopexit
.cfi_def_cfa_offset 16
setae %bl
jmp .LBB2_7
.Lfunc_end2:
.size _Z7comparePfS_, .Lfunc_end2-_Z7comparePfS_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %rbx
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %r14
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %r15
movl $4096, %edi # imm = 0x1000
callq malloc
testq %rbx, %rbx
je .LBB3_18
# %bb.1:
testq %r14, %r14
je .LBB3_18
# %bb.2:
testq %r15, %r15
je .LBB3_18
# %bb.3:
testq %rax, %rax
je .LBB3_18
# %bb.4:
movq %rax, 16(%rsp) # 8-byte Spill
leaq 40(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
leaq 32(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
movl $1024, %eax # imm = 0x400
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB3_5: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %ecx, %xmm0
movss %xmm0, (%rbx,%rcx,4)
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r14,%rcx,4)
incq %rcx
decl %eax
cmpq $1024, %rcx # imm = 0x400
jne .LBB3_5
# %bb.6:
movl $.Lstr, %edi
callq puts@PLT
xorl %r12d, %r12d
callq clock
movq %rax, 8(%rsp) # 8-byte Spill
.p2align 4, 0x90
.LBB3_7: # %.preheader.i
# =>This Inner Loop Header: Depth=1
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%r14,%r12,4), %xmm0
movss %xmm0, (%r15,%r12,4)
incq %r12
cmpq $1024, %r12 # imm = 0x400
jne .LBB3_7
# %bb.8: # %_Z7hMatAddPfS_S_.exit
callq clock
movq %rax, 48(%rsp) # 8-byte Spill
movl $.Lstr.1, %edi
callq puts@PLT
callq clock
movq %rax, (%rsp) # 8-byte Spill
movq 40(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 1023(%rdi), %rdx
xorl %r13d, %r13d
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_10
# %bb.9:
movq 40(%rsp), %rax
movq 32(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movq %rdx, 104(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z7dMatAddPfS_S_, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_10:
movq 24(%rsp), %rsi
movl $4096, %edx # imm = 0x1000
movq 16(%rsp), %r12 # 8-byte Reload
movq %r12, %rdi
movl $2, %ecx
callq hipMemcpy
callq clock
movq %rax, %rbp
movl $.Lstr.2, %edi
callq puts@PLT
movss (%r15), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%r12), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm1, %xmm0
movl $0, %r12d
jne .LBB3_15
jp .LBB3_15
# %bb.11: # %.lr.ph.preheader
xorl %r13d, %r13d
movq 16(%rsp), %rax # 8-byte Reload
.p2align 4, 0x90
.LBB3_12: # %.lr.ph
# =>This Inner Loop Header: Depth=1
cmpq $1023, %r13 # imm = 0x3FF
je .LBB3_16
# %bb.13: # in Loop: Header=BB3_12 Depth=1
movss 4(%r15,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss 4(%rax,%r13,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
incq %r13
ucomiss %xmm1, %xmm0
jne .LBB3_14
jnp .LBB3_12
.LBB3_14: # %._crit_edge.loopexit
leaq -1(%r13), %rax
cmpq $1023, %rax # imm = 0x3FF
setae %r12b
.LBB3_15: # %._crit_edge
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl $.L.str, %edi
movl %r13d, %esi
movb $2, %al
callq printf
.LBB3_17: # %_Z7comparePfS_.exit
subq (%rsp), %rbp # 8-byte Folded Reload
xorps %xmm1, %xmm1
cvtsi2sd %rbp, %xmm1
movsd .LCPI3_0(%rip), %xmm0 # xmm0 = mem[0],zero
divsd %xmm0, %xmm1
movsd %xmm1, (%rsp) # 8-byte Spill
movq 48(%rsp), %rax # 8-byte Reload
subq 8(%rsp), %rax # 8-byte Folded Reload
xorps %xmm1, %xmm1
cvtsi2sd %rax, %xmm1
divsd %xmm0, %xmm1
movsd %xmm1, 8(%rsp) # 8-byte Spill
movl $.L.str.6, %eax
movl $.L.str.7, %esi
testb %r12b, %r12b
cmovneq %rax, %rsi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
movl $.L.str.8, %edi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $1, %al
callq printf
movl $.L.str.9, %edi
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $1, %al
callq printf
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq 16(%rsp), %rdi # 8-byte Reload
callq free
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_16: # %_Z7comparePfS_.exit.loopexit
.cfi_def_cfa_offset 208
setae %r12b
jmp .LBB3_17
.LBB3_18:
movl $.Lstr.3, %edi
callq puts@PLT
movl $1, %edi
callq exit
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7dMatAddPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7dMatAddPfS_S_,@object # @_Z7dMatAddPfS_S_
.section .rodata,"a",@progbits
.globl _Z7dMatAddPfS_S_
.p2align 3, 0x0
_Z7dMatAddPfS_S_:
.quad _Z22__device_stub__dMatAddPfS_S_
.size _Z7dMatAddPfS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Elements are not equal. Index %d\t\tA: %f\t\tB:%f\n"
.size .L.str, 47
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Vectors are equal?: %s\n"
.size .L.str.5, 24
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "true"
.size .L.str.6, 5
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "false"
.size .L.str.7, 6
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "CPU: %f seconds\n"
.size .L.str.8, 17
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "GPU: %f seconds\n"
.size .L.str.9, 17
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7dMatAddPfS_S_"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Running Host function"
.size .Lstr, 22
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Running Device function"
.size .Lstr.1, 24
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Running Compare function"
.size .Lstr.2, 25
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Malloc error!"
.size .Lstr.3, 14
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__dMatAddPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7dMatAddPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z7dMatAddPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ UMOV UR5, 0x989680 ; /* 0x0098968000057882 */
/* 0x000fe40000000000 */
/*0050*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fca0008000000 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*0090*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000ea8000c1e1900 */
/*00a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001ea4000c1e1900 */
/*00b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x004fca0000000000 */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*00d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000ea8000c1e1900 */
/*00e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x000ea4000c1e1900 */
/*00f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x004fca0000000000 */
/*0100*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0003e8000c101906 */
/*0110*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000ea8000c1e1900 */
/*0120*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x000ea4000c1e1900 */
/*0130*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x004fca0000000000 */
/*0140*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0005e8000c101906 */
/*0150*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000ee8000c1e1900 */
/*0160*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x000ee4000c1e1900 */
/*0170*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x008fca0000000000 */
/*0180*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0007e8000c101906 */
/*0190*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*01a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001f24000c1e1900 */
/*01b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x010fca0000000000 */
/*01c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*01d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*01e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x002f24000c1e1900 */
/*01f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x010fca0000000000 */
/*0200*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0003e8000c101906 */
/*0210*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0220*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x004f24000c1e1900 */
/*0230*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x010fca0000000000 */
/*0240*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0005e8000c101906 */
/*0250*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0260*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x008f24000c1e1900 */
/*0270*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x010fca0000000000 */
/*0280*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0007e8000c101906 */
/*0290*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*02a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001f24000c1e1900 */
/*02b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x010fca0000000000 */
/*02c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*02d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*02e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x002f24000c1e1900 */
/*02f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x010fca0000000000 */
/*0300*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0003e8000c101906 */
/*0310*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0320*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x004f24000c1e1900 */
/*0330*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x010fca0000000000 */
/*0340*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0005e8000c101906 */
/*0350*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0360*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x008f24000c1e1900 */
/*0370*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x010fca0000000000 */
/*0380*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0007e8000c101906 */
/*0390*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*03a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001f24000c1e1900 */
/*03b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x010fca0000000000 */
/*03c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*03d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*03e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x002f24000c1e1900 */
/*03f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x010fca0000000000 */
/*0400*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0003e8000c101906 */
/*0410*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0420*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x004f24000c1e1900 */
/*0430*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x010fca0000000000 */
/*0440*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0005e8000c101906 */
/*0450*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0460*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x008f24000c1e1900 */
/*0470*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x010fca0000000000 */
/*0480*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0007e8000c101906 */
/*0490*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*04a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001f24000c1e1900 */
/*04b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x010fca0000000000 */
/*04c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*04d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*04e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x002f24000c1e1900 */
/*04f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x010fca0000000000 */
/*0500*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0003e8000c101906 */
/*0510*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0520*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x004f24000c1e1900 */
/*0530*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x010fca0000000000 */
/*0540*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0005e8000c101906 */
/*0550*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0560*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x008f24000c1e1900 */
/*0570*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x010fca0000000000 */
/*0580*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0007e8000c101906 */
/*0590*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*05a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001f24000c1e1900 */
/*05b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x010fca0000000000 */
/*05c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*05d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*05e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x002f24000c1e1900 */
/*05f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x010fca0000000000 */
/*0600*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0003e8000c101906 */
/*0610*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0620*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x004f24000c1e1900 */
/*0630*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x010fca0000000000 */
/*0640*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0005e8000c101906 */
/*0650*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0660*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x008f24000c1e1900 */
/*0670*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x010fca0000000000 */
/*0680*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0007e8000c101906 */
/*0690*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*06a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001f24000c1e1900 */
/*06b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x010fca0000000000 */
/*06c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*06d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*06e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x002f24000c1e1900 */
/*06f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x010fca0000000000 */
/*0700*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0003e8000c101906 */
/*0710*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0720*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x004f24000c1e1900 */
/*0730*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x010fca0000000000 */
/*0740*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0005e8000c101906 */
/*0750*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0760*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x008f24000c1e1900 */
/*0770*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x010fca0000000000 */
/*0780*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0007e8000c101906 */
/*0790*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*07a0*/ LDG.E R9, [R2.64] ; /* 0x0000000602097981 */
/* 0x001f24000c1e1900 */
/*07b0*/ FADD R9, R0, R9 ; /* 0x0000000900097221 */
/* 0x010fca0000000000 */
/*07c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001e8000c101906 */
/*07d0*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*07e0*/ LDG.E R11, [R2.64] ; /* 0x00000006020b7981 */
/* 0x002f24000c1e1900 */
/*07f0*/ FADD R11, R0, R11 ; /* 0x0000000b000b7221 */
/* 0x010fca0000000000 */
/*0800*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0001e8000c101906 */
/*0810*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000f28000c1e1900 */
/*0820*/ LDG.E R13, [R2.64] ; /* 0x00000006020d7981 */
/* 0x004f22000c1e1900 */
/*0830*/ UIADD3 UR5, UP0, UR5, -0x20, URZ ; /* 0xffffffe005057890 */
/* 0x000fc8000ff1e03f */
/*0840*/ UIADD3.X UR4, UR4, -0x1, URZ, UP0, !UPT ; /* 0xffffffff04047890 */
/* 0x000fe200087fe43f */
/*0850*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x010fca0000000000 */
/*0860*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x0001e8000c101906 */
/*0870*/ LDG.E R0, [R4.64] ; /* 0x0000000604007981 */
/* 0x000ea8000c1e1900 */
/*0880*/ LDG.E R15, [R2.64] ; /* 0x00000006020f7981 */
/* 0x008ea2000c1e1900 */
/*0890*/ ISETP.NE.U32.AND P0, PT, RZ, UR5, PT ; /* 0x00000005ff007c0c */
/* 0x000fc8000bf05070 */
/*08a0*/ ISETP.NE.AND.EX P0, PT, RZ, UR4, PT, P0 ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf05300 */
/*08b0*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x004fca0000000000 */
/*08c0*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0001ee000c101906 */
/*08d0*/ @P0 BRA 0x90 ; /* 0xfffff7b000000947 */
/* 0x000fea000383ffff */
/*08e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*08f0*/ BRA 0x8f0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0900*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0910*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0920*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0930*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0940*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0950*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0960*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0970*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7dMatAddPfS_S_
.globl _Z7dMatAddPfS_S_
.p2align 8
.type _Z7dMatAddPfS_S_,@function
_Z7dMatAddPfS_S_:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
v_lshlrev_b32_e32 v4, 2, v0
s_mov_b64 s[0:1], 0x989680
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v0, s4, s4, v4
v_add_co_ci_u32_e64 v1, null, s5, 0, s4
v_add_co_u32 v2, s4, s6, v4
v_add_co_u32 v4, s2, s2, v4
v_add_co_ci_u32_e64 v3, null, s7, 0, s4
v_add_co_ci_u32_e64 v5, null, s3, 0, s2
.LBB0_1:
global_load_b32 v6, v[0:1], off
global_load_b32 v7, v[2:3], off
s_add_u32 s0, s0, -1
s_addc_u32 s1, s1, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u64 s[0:1], 0
s_waitcnt vmcnt(0)
v_add_f32_e32 v6, v6, v7
global_store_b32 v[4:5], v6, off
s_cbranch_scc0 .LBB0_1
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7dMatAddPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7dMatAddPfS_S_, .Lfunc_end0-_Z7dMatAddPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7dMatAddPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z7dMatAddPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00065296_00000000-6_00.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7hMatAddPfS_S_
.type _Z7hMatAddPfS_S_, @function
_Z7hMatAddPfS_S_:
.LFB2057:
.cfi_startproc
endbr64
movq %rdx, %rcx
movq %rdi, %rdx
addq $4096, %rdi
.L4:
movl $10000000, %eax
.L5:
movss (%rdx), %xmm0
addss (%rsi), %xmm0
movss %xmm0, (%rcx)
subq $1, %rax
jne .L5
addq $4, %rdx
addq $4, %rsi
addq $4, %rcx
cmpq %rdi, %rdx
jne .L4
ret
.cfi_endproc
.LFE2057:
.size _Z7hMatAddPfS_S_, .-_Z7hMatAddPfS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Elements are not equal. Index %d\t\tA: %f\t\tB:%f\n"
.text
.globl _Z7comparePfS_
.type _Z7comparePfS_, @function
_Z7comparePfS_:
.LFB2058:
.cfi_startproc
endbr64
movl $0, %edx
.L12:
movss (%rdi,%rdx,4), %xmm0
movss (%rsi,%rdx,4), %xmm1
ucomiss %xmm1, %xmm0
jp .L13
jne .L13
addq $1, %rdx
cmpq $1024, %rdx
jne .L12
movl $1, %eax
ret
.L13:
subq $8, %rsp
.cfi_def_cfa_offset 16
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z7comparePfS_, .-_Z7comparePfS_
.globl _Z30__device_stub__Z7dMatAddPfS_S_PfS_S_
.type _Z30__device_stub__Z7dMatAddPfS_S_PfS_S_, @function
_Z30__device_stub__Z7dMatAddPfS_S_PfS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L22
.L18:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L23
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L22:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7dMatAddPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L18
.L23:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z30__device_stub__Z7dMatAddPfS_S_PfS_S_, .-_Z30__device_stub__Z7dMatAddPfS_S_PfS_S_
.globl _Z7dMatAddPfS_S_
.type _Z7dMatAddPfS_S_, @function
_Z7dMatAddPfS_S_:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z7dMatAddPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z7dMatAddPfS_S_, .-_Z7dMatAddPfS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "true"
.LC2:
.string "false"
.LC3:
.string "Malloc error!\n"
.LC4:
.string "Running Host function\n"
.LC5:
.string "Running Device function\n"
.LC7:
.string "Running Compare function\n"
.LC8:
.string "Vectors are equal?: %s\n"
.LC9:
.string "CPU: %f seconds\n"
.LC10:
.string "GPU: %f seconds\n"
.text
.globl main
.type main, @function
main:
.LFB2059:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $4096, %edi
call malloc@PLT
movq %rax, %rbp
movl $4096, %edi
call malloc@PLT
movq %rax, %rbx
movl $4096, %edi
call malloc@PLT
movq %rax, %r13
movl $4096, %edi
call malloc@PLT
testq %rbp, %rbp
je .L27
movq %rax, %r12
testq %rbx, %rbx
je .L27
testq %r13, %r13
je .L27
testq %rax, %rax
je .L27
leaq 24(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
movl $0, %eax
movl $1024, %ecx
.L30:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rax,4)
movl %ecx, %edx
subl %eax, %edx
pxor %xmm0, %xmm0
cvtsi2ssl %edx, %xmm0
movss %xmm0, (%rbx,%rax,4)
addq $1, %rax
cmpq $1024, %rax
jne .L30
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call clock@PLT
movq %rax, %r15
movq %r13, %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z7hMatAddPfS_S_
call clock@PLT
movq %rax, 8(%rsp)
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call clock@PLT
movq %rax, %r14
movl $1, %ecx
movl $4096, %edx
movq %rbp, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4096, %edx
movq %rbx, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $1024, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L37
.L31:
movl $2, %ecx
movl $4096, %edx
movq 40(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
call clock@PLT
movq 8(%rsp), %rdx
subq %r15, %rdx
pxor %xmm1, %xmm1
cvtsi2sdq %rdx, %xmm1
movsd .LC6(%rip), %xmm0
divsd %xmm0, %xmm1
movq %xmm1, %r15
subq %r14, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
divsd %xmm0, %xmm1
movq %xmm1, %r14
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r12, %rsi
movq %r13, %rdi
call _Z7comparePfS_
testb %al, %al
leaq .LC2(%rip), %rdx
leaq .LC1(%rip), %rax
cmovne %rax, %rdx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r15, %xmm0
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %r14, %xmm0
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L38
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L27:
.cfi_restore_state
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L37:
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z30__device_stub__Z7dMatAddPfS_S_PfS_S_
jmp .L31
.L38:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size main, .-main
.section .rodata.str1.1
.LC11:
.string "_Z7dMatAddPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z7dMatAddPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC6:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "00.hip"
.globl _Z22__device_stub__dMatAddPfS_S_ # -- Begin function _Z22__device_stub__dMatAddPfS_S_
.p2align 4, 0x90
.type _Z22__device_stub__dMatAddPfS_S_,@function
_Z22__device_stub__dMatAddPfS_S_: # @_Z22__device_stub__dMatAddPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7dMatAddPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z22__device_stub__dMatAddPfS_S_, .Lfunc_end0-_Z22__device_stub__dMatAddPfS_S_
.cfi_endproc
# -- End function
.globl _Z7hMatAddPfS_S_ # -- Begin function _Z7hMatAddPfS_S_
.p2align 4, 0x90
.type _Z7hMatAddPfS_S_,@function
_Z7hMatAddPfS_S_: # @_Z7hMatAddPfS_S_
.cfi_startproc
# %bb.0:
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_2 Depth 2
movl $10000000, %ecx # imm = 0x989680
.p2align 4, 0x90
.LBB1_2: # Parent Loop BB1_1 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rdi,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%rsi,%rax,4), %xmm0
movss %xmm0, (%rdx,%rax,4)
decq %rcx
jne .LBB1_2
# %bb.3: # in Loop: Header=BB1_1 Depth=1
incq %rax
cmpq $1024, %rax # imm = 0x400
jne .LBB1_1
# %bb.4:
retq
.Lfunc_end1:
.size _Z7hMatAddPfS_S_, .Lfunc_end1-_Z7hMatAddPfS_S_
.cfi_endproc
# -- End function
.globl _Z7comparePfS_ # -- Begin function _Z7comparePfS_
.p2align 4, 0x90
.type _Z7comparePfS_,@function
_Z7comparePfS_: # @_Z7comparePfS_
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movq %rsi, %rax
movss (%rdi), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%rsi), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorl %esi, %esi
ucomiss %xmm1, %xmm0
movl $0, %ebx
jne .LBB2_5
jp .LBB2_5
# %bb.1: # %.lr.ph.preheader
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
cmpq $1023, %rsi # imm = 0x3FF
je .LBB2_6
# %bb.3: # in Loop: Header=BB2_2 Depth=1
movss 4(%rdi,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss 4(%rax,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
incq %rsi
ucomiss %xmm1, %xmm0
jne .LBB2_4
jnp .LBB2_2
.LBB2_4: # %._crit_edge
leaq -1(%rsi), %rax
cmpq $1023, %rax # imm = 0x3FF
setae %bl
.LBB2_5:
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl $.L.str, %edi
# kill: def $esi killed $esi killed $rsi
movb $2, %al
callq printf
.LBB2_7: # %.loopexit
movl %ebx, %eax
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB2_6: # %.loopexit.loopexit
.cfi_def_cfa_offset 16
setae %bl
jmp .LBB2_7
.Lfunc_end2:
.size _Z7comparePfS_, .Lfunc_end2-_Z7comparePfS_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %rbx
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %r14
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %r15
movl $4096, %edi # imm = 0x1000
callq malloc
testq %rbx, %rbx
je .LBB3_18
# %bb.1:
testq %r14, %r14
je .LBB3_18
# %bb.2:
testq %r15, %r15
je .LBB3_18
# %bb.3:
testq %rax, %rax
je .LBB3_18
# %bb.4:
movq %rax, 16(%rsp) # 8-byte Spill
leaq 40(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
leaq 32(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
movl $1024, %eax # imm = 0x400
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB3_5: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %ecx, %xmm0
movss %xmm0, (%rbx,%rcx,4)
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r14,%rcx,4)
incq %rcx
decl %eax
cmpq $1024, %rcx # imm = 0x400
jne .LBB3_5
# %bb.6:
movl $.Lstr, %edi
callq puts@PLT
xorl %r12d, %r12d
callq clock
movq %rax, 8(%rsp) # 8-byte Spill
.p2align 4, 0x90
.LBB3_7: # %.preheader.i
# =>This Inner Loop Header: Depth=1
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%r14,%r12,4), %xmm0
movss %xmm0, (%r15,%r12,4)
incq %r12
cmpq $1024, %r12 # imm = 0x400
jne .LBB3_7
# %bb.8: # %_Z7hMatAddPfS_S_.exit
callq clock
movq %rax, 48(%rsp) # 8-byte Spill
movl $.Lstr.1, %edi
callq puts@PLT
callq clock
movq %rax, (%rsp) # 8-byte Spill
movq 40(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 1023(%rdi), %rdx
xorl %r13d, %r13d
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_10
# %bb.9:
movq 40(%rsp), %rax
movq 32(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movq %rdx, 104(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z7dMatAddPfS_S_, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_10:
movq 24(%rsp), %rsi
movl $4096, %edx # imm = 0x1000
movq 16(%rsp), %r12 # 8-byte Reload
movq %r12, %rdi
movl $2, %ecx
callq hipMemcpy
callq clock
movq %rax, %rbp
movl $.Lstr.2, %edi
callq puts@PLT
movss (%r15), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%r12), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm1, %xmm0
movl $0, %r12d
jne .LBB3_15
jp .LBB3_15
# %bb.11: # %.lr.ph.preheader
xorl %r13d, %r13d
movq 16(%rsp), %rax # 8-byte Reload
.p2align 4, 0x90
.LBB3_12: # %.lr.ph
# =>This Inner Loop Header: Depth=1
cmpq $1023, %r13 # imm = 0x3FF
je .LBB3_16
# %bb.13: # in Loop: Header=BB3_12 Depth=1
movss 4(%r15,%r13,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss 4(%rax,%r13,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
incq %r13
ucomiss %xmm1, %xmm0
jne .LBB3_14
jnp .LBB3_12
.LBB3_14: # %._crit_edge.loopexit
leaq -1(%r13), %rax
cmpq $1023, %rax # imm = 0x3FF
setae %r12b
.LBB3_15: # %._crit_edge
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl $.L.str, %edi
movl %r13d, %esi
movb $2, %al
callq printf
.LBB3_17: # %_Z7comparePfS_.exit
subq (%rsp), %rbp # 8-byte Folded Reload
xorps %xmm1, %xmm1
cvtsi2sd %rbp, %xmm1
movsd .LCPI3_0(%rip), %xmm0 # xmm0 = mem[0],zero
divsd %xmm0, %xmm1
movsd %xmm1, (%rsp) # 8-byte Spill
movq 48(%rsp), %rax # 8-byte Reload
subq 8(%rsp), %rax # 8-byte Folded Reload
xorps %xmm1, %xmm1
cvtsi2sd %rax, %xmm1
divsd %xmm0, %xmm1
movsd %xmm1, 8(%rsp) # 8-byte Spill
movl $.L.str.6, %eax
movl $.L.str.7, %esi
testb %r12b, %r12b
cmovneq %rax, %rsi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
movl $.L.str.8, %edi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $1, %al
callq printf
movl $.L.str.9, %edi
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $1, %al
callq printf
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq 16(%rsp), %rdi # 8-byte Reload
callq free
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_16: # %_Z7comparePfS_.exit.loopexit
.cfi_def_cfa_offset 208
setae %r12b
jmp .LBB3_17
.LBB3_18:
movl $.Lstr.3, %edi
callq puts@PLT
movl $1, %edi
callq exit
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7dMatAddPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7dMatAddPfS_S_,@object # @_Z7dMatAddPfS_S_
.section .rodata,"a",@progbits
.globl _Z7dMatAddPfS_S_
.p2align 3, 0x0
_Z7dMatAddPfS_S_:
.quad _Z22__device_stub__dMatAddPfS_S_
.size _Z7dMatAddPfS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Elements are not equal. Index %d\t\tA: %f\t\tB:%f\n"
.size .L.str, 47
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Vectors are equal?: %s\n"
.size .L.str.5, 24
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "true"
.size .L.str.6, 5
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "false"
.size .L.str.7, 6
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "CPU: %f seconds\n"
.size .L.str.8, 17
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "GPU: %f seconds\n"
.size .L.str.9, 17
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7dMatAddPfS_S_"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Running Host function"
.size .Lstr, 22
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Running Device function"
.size .Lstr.1, 24
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Running Compare function"
.size .Lstr.2, 25
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Malloc error!"
.size .Lstr.3, 14
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__dMatAddPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7dMatAddPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void StarRadKernel (double *Qbase2, double *Vrad, double *QStar, double dt, int nrad, int nsec, double *invdiffRmed, double *Rmed, double *dq)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double dqm, dqp;
if (i<nrad && j<nsec){
if ((i == 0 || i == nrad-1)) dq[i + j*nrad] = 0.0;
else {
dqm = (Qbase2[i*nsec + j] - Qbase2[(i-1)*nsec + j])*invdiffRmed[i];
dqp = (Qbase2[(i+1)*nsec + j] - Qbase2[i*nsec + j])*invdiffRmed[i+1];
if (dqp * dqm > 0.0)
dq[i+j*nrad] = 2.0*dqp*dqm/(dqp+dqm);
else
dq[i+j*nrad] = 0.0;
}
}
} | code for sm_80
Function : _Z13StarRadKernelPdS_S_diiS_S_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R11, SR_CTAID.Y ; /* 0x00000000000b7919 */
/* 0x000e680000002600 */
/*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x184], PT ; /* 0x0000610000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R11, R11, c[0x0][0x4], R2 ; /* 0x000001000b0b7a24 */
/* 0x002fca00078e0202 */
/*0080*/ ISETP.GE.OR P0, PT, R11, c[0x0][0x180], P0 ; /* 0x000060000b007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ ULDC UR4, c[0x0][0x180] ; /* 0x0000600000047ab9 */
/* 0x000fe20000000800 */
/*00b0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x8 ; /* 0x00000008ff097424 */
/* 0x000fe200078e00ff */
/*00c0*/ UIADD3 UR4, UR4, -0x1, URZ ; /* 0xffffffff04047890 */
/* 0x000fe2000fffe03f */
/*00d0*/ IMAD R2, R0, c[0x0][0x180], R11 ; /* 0x0000600000027a24 */
/* 0x000fc800078e020b */
/*00e0*/ IMAD.WIDE R2, R2, R9, c[0x0][0x198] ; /* 0x0000660002027625 */
/* 0x000fe200078e0209 */
/*00f0*/ ISETP.NE.AND P0, PT, R11, UR4, PT ; /* 0x000000040b007c0c */
/* 0x000fe2000bf05270 */
/*0100*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0110*/ ISETP.EQ.OR P0, PT, R11, RZ, !P0 ; /* 0x000000ff0b00720c */
/* 0x000fda0004702670 */
/*0120*/ @P0 BRA 0x420 ; /* 0x000002f000000947 */
/* 0x000fea0003800000 */
/*0130*/ IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff0e7624 */
/* 0x000fe400078e00ff */
/*0140*/ IMAD R4, R11.reuse, c[0x0][0x184], R0 ; /* 0x000061000b047a24 */
/* 0x040fe400078e0200 */
/*0150*/ IMAD R5, R11.reuse, R14, -c[0x0][0x184] ; /* 0x800061000b057624 */
/* 0x040fe400078e020e */
/*0160*/ IMAD.WIDE R10, R11, R9, c[0x0][0x188] ; /* 0x000062000b0a7625 */
/* 0x000fc800078e0209 */
/*0170*/ IMAD.IADD R6, R0, 0x1, R5 ; /* 0x0000000100067824 */
/* 0x000fe200078e0205 */
/*0180*/ LDG.E.64 R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea2000c1e1b00 */
/*0190*/ IMAD.WIDE R4, R4, R9, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fc600078e0209 */
/*01a0*/ LDG.E.64 R18, [R10.64+0x8] ; /* 0x000008040a127981 */
/* 0x000ee2000c1e1b00 */
/*01b0*/ IMAD R14, R14, 0x2, R6 ; /* 0x000000020e0e7824 */
/* 0x000fe400078e0206 */
/*01c0*/ IMAD.WIDE R6, R6, R9.reuse, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x080fe200078e0209 */
/*01d0*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000f26000c1e1b00 */
/*01e0*/ IMAD.WIDE R14, R14, R9, c[0x0][0x160] ; /* 0x000058000e0e7625 */
/* 0x000fe400078e0209 */
/*01f0*/ LDG.E.64 R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000f28000c1e1b00 */
/*0200*/ LDG.E.64 R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000f62000c1e1b00 */
/*0210*/ DADD R8, -R6, R4 ; /* 0x0000000006087229 */
/* 0x010e880000000104 */
/*0220*/ DADD R16, -R4, R14 ; /* 0x0000000004107229 */
/* 0x020ec8000000010e */
/*0230*/ DMUL R12, R8, R12 ; /* 0x0000000c080c7228 */
/* 0x004fc80000000000 */
/*0240*/ DMUL R16, R16, R18 ; /* 0x0000001210107228 */
/* 0x008e0c0000000000 */
/*0250*/ DMUL R8, R12, R16 ; /* 0x000000100c087228 */
/* 0x001e0c0000000000 */
/*0260*/ DSETP.GT.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800722a */
/* 0x001e1c0003f04000 */
/*0270*/ @!P0 STG.E.64 [R2.64], RZ ; /* 0x000000ff02008986 */
/* 0x0011e2000c101b04 */
/*0280*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0290*/ DADD R8, R12, R16.reuse ; /* 0x000000000c087229 */
/* 0x100e620000000010 */
/*02a0*/ IMAD.MOV.U32 R4, RZ, RZ, 0x1 ; /* 0x00000001ff047424 */
/* 0x000fe200078e00ff */
/*02b0*/ BSSY B0, 0x400 ; /* 0x0000014000007945 */
/* 0x000fe40003800000 */
/*02c0*/ DADD R16, R16, R16 ; /* 0x0000000010107229 */
/* 0x000ea40000000010 */
/*02d0*/ MUFU.RCP64H R5, R9 ; /* 0x0000000900057308 */
/* 0x002e680000001800 */
/*02e0*/ DMUL R16, R12, R16 ; /* 0x000000100c107228 */
/* 0x004e940000000000 */
/*02f0*/ FSETP.GEU.AND P1, PT, |R17|, 6.5827683646048100446e-37, PT ; /* 0x036000001100780b */
/* 0x004fe20003f2e200 */
/*0300*/ DFMA R6, -R8, R4, 1 ; /* 0x3ff000000806742b */
/* 0x002e4c0000000104 */
/*0310*/ DFMA R6, R6, R6, R6 ; /* 0x000000060606722b */
/* 0x002e4c0000000006 */
/*0320*/ DFMA R6, R4, R6, R4 ; /* 0x000000060406722b */
/* 0x002e4c0000000004 */
/*0330*/ DFMA R4, -R8, R6, 1 ; /* 0x3ff000000804742b */
/* 0x002e4c0000000106 */
/*0340*/ DFMA R4, R6, R4, R6 ; /* 0x000000040604722b */
/* 0x002e4c0000000006 */
/*0350*/ DMUL R6, R16, R4 ; /* 0x0000000410067228 */
/* 0x002e4c0000000000 */
/*0360*/ DFMA R10, -R8, R6, R16 ; /* 0x00000006080a722b */
/* 0x002e4c0000000110 */
/*0370*/ DFMA R4, R4, R10, R6 ; /* 0x0000000a0404722b */
/* 0x002e540000000006 */
/*0380*/ FFMA R0, RZ, R9, R5 ; /* 0x00000009ff007223 */
/* 0x002fca0000000005 */
/*0390*/ FSETP.GT.AND P0, PT, |R0|, 1.469367938527859385e-39, PT ; /* 0x001000000000780b */
/* 0x000fda0003f04200 */
/*03a0*/ @P0 BRA P1, 0x3f0 ; /* 0x0000004000000947 */
/* 0x000fea0000800000 */
/*03b0*/ MOV R0, 0x3d0 ; /* 0x000003d000007802 */
/* 0x000fe40000000f00 */
/*03c0*/ CALL.REL.NOINC 0x440 ; /* 0x0000007000007944 */
/* 0x001fea0003c00000 */
/*03d0*/ IMAD.MOV.U32 R4, RZ, RZ, R12 ; /* 0x000000ffff047224 */
/* 0x000fe400078e000c */
/*03e0*/ IMAD.MOV.U32 R5, RZ, RZ, R13 ; /* 0x000000ffff057224 */
/* 0x000fe400078e000d */
/*03f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0400*/ STG.E.64 [R2.64], R4 ; /* 0x0000000402007986 */
/* 0x000fe2000c101b04 */
/*0410*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0420*/ STG.E.64 [R2.64], RZ ; /* 0x000000ff02007986 */
/* 0x000fe2000c101b04 */
/*0430*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0440*/ FSETP.GEU.AND P0, PT, |R9|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000000900780b */
/* 0x040fe20003f0e200 */
/*0450*/ IMAD.MOV.U32 R7, RZ, RZ, R17 ; /* 0x000000ffff077224 */
/* 0x000fe200078e0011 */
/*0460*/ LOP3.LUT R4, R9.reuse, 0x800fffff, RZ, 0xc0, !PT ; /* 0x800fffff09047812 */
/* 0x040fe200078ec0ff */
/*0470*/ IMAD.MOV.U32 R13, RZ, RZ, 0x1ca00000 ; /* 0x1ca00000ff0d7424 */
/* 0x000fe200078e00ff */
/*0480*/ LOP3.LUT R15, R9, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff00000090f7812 */
/* 0x000fe200078ec0ff */
/*0490*/ IMAD.MOV.U32 R6, RZ, RZ, R16 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0010 */
/*04a0*/ LOP3.LUT R5, R4, 0x3ff00000, RZ, 0xfc, !PT ; /* 0x3ff0000004057812 */
/* 0x000fe200078efcff */
/*04b0*/ IMAD.MOV.U32 R4, RZ, RZ, R8 ; /* 0x000000ffff047224 */
/* 0x000fe200078e0008 */
/*04c0*/ FSETP.GEU.AND P2, PT, |R7|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000000700780b */
/* 0x040fe20003f4e200 */
/*04d0*/ IMAD.MOV.U32 R16, RZ, RZ, 0x1 ; /* 0x00000001ff107424 */
/* 0x000fe200078e00ff */
/*04e0*/ LOP3.LUT R12, R7, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff00000070c7812 */
/* 0x000fe200078ec0ff */
/*04f0*/ IMAD.MOV.U32 R10, RZ, RZ, R6 ; /* 0x000000ffff0a7224 */
/* 0x000fe200078e0006 */
/*0500*/ BSSY B1, 0xa00 ; /* 0x000004f000017945 */
/* 0x000fe20003800000 */
/*0510*/ @!P0 DMUL R4, R8, 8.98846567431157953865e+307 ; /* 0x7fe0000008048828 */
/* 0x000e220000000000 */
/*0520*/ ISETP.GE.U32.AND P1, PT, R12, R15, PT ; /* 0x0000000f0c00720c */
/* 0x000fca0003f26070 */
/*0530*/ MUFU.RCP64H R17, R5 ; /* 0x0000000500117308 */
/* 0x001e240000001800 */
/*0540*/ @!P2 LOP3.LUT R11, R9, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff00000090ba812 */
/* 0x000fe200078ec0ff */
/*0550*/ @!P2 IMAD.MOV.U32 R20, RZ, RZ, RZ ; /* 0x000000ffff14a224 */
/* 0x000fc600078e00ff */
/*0560*/ @!P2 ISETP.GE.U32.AND P3, PT, R12, R11, PT ; /* 0x0000000b0c00a20c */
/* 0x000fe40003f66070 */
/*0570*/ SEL R11, R13.reuse, 0x63400000, !P1 ; /* 0x634000000d0b7807 */
/* 0x040fe40004800000 */
/*0580*/ @!P2 SEL R21, R13, 0x63400000, !P3 ; /* 0x634000000d15a807 */
/* 0x000fe40005800000 */
/*0590*/ LOP3.LUT R11, R11, 0x800fffff, R7.reuse, 0xf8, !PT ; /* 0x800fffff0b0b7812 */
/* 0x100fe400078ef807 */
/*05a0*/ @!P2 LOP3.LUT R21, R21, 0x80000000, R7, 0xf8, !PT ; /* 0x800000001515a812 */
/* 0x000fe200078ef807 */
/*05b0*/ DFMA R18, R16, -R4, 1 ; /* 0x3ff000001012742b */
/* 0x001e060000000804 */
/*05c0*/ @!P2 LOP3.LUT R21, R21, 0x100000, RZ, 0xfc, !PT ; /* 0x001000001515a812 */
/* 0x000fc600078efcff */
/*05d0*/ DFMA R18, R18, R18, R18 ; /* 0x000000121212722b */
/* 0x001e080000000012 */
/*05e0*/ @!P2 DFMA R10, R10, 2, -R20 ; /* 0x400000000a0aa82b */
/* 0x000fc80000000814 */
/*05f0*/ DFMA R18, R16, R18, R16 ; /* 0x000000121012722b */
/* 0x0010640000000010 */
/*0600*/ IMAD.MOV.U32 R16, RZ, RZ, R12 ; /* 0x000000ffff107224 */
/* 0x001fe400078e000c */
/*0610*/ IMAD.MOV.U32 R17, RZ, RZ, R15 ; /* 0x000000ffff117224 */
/* 0x000fe200078e000f */
/*0620*/ @!P0 LOP3.LUT R17, R5, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000005118812 */
/* 0x000fe200078ec0ff */
/*0630*/ DFMA R20, R18, -R4, 1 ; /* 0x3ff000001214742b */
/* 0x002e220000000804 */
/*0640*/ @!P2 LOP3.LUT R16, R11, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff000000b10a812 */
/* 0x000fe400078ec0ff */
/*0650*/ IADD3 R22, R17, -0x1, RZ ; /* 0xffffffff11167810 */
/* 0x000fc60007ffe0ff */
/*0660*/ DFMA R18, R18, R20, R18 ; /* 0x000000141212722b */
/* 0x0010640000000012 */
/*0670*/ IADD3 R20, R16, -0x1, RZ ; /* 0xffffffff10147810 */
/* 0x001fc80007ffe0ff */
/*0680*/ ISETP.GT.U32.AND P0, PT, R20, 0x7feffffe, PT ; /* 0x7feffffe1400780c */
/* 0x000fe20003f04070 */
/*0690*/ DMUL R14, R18, R10 ; /* 0x0000000a120e7228 */
/* 0x002e060000000000 */
/*06a0*/ ISETP.GT.U32.OR P0, PT, R22, 0x7feffffe, P0 ; /* 0x7feffffe1600780c */
/* 0x000fc60000704470 */
/*06b0*/ DFMA R20, R14, -R4, R10 ; /* 0x800000040e14722b */
/* 0x001e0c000000000a */
/*06c0*/ DFMA R14, R18, R20, R14 ; /* 0x00000014120e722b */
/* 0x001048000000000e */
/*06d0*/ @P0 BRA 0x8a0 ; /* 0x000001c000000947 */
/* 0x000fea0003800000 */
/*06e0*/ LOP3.LUT R7, R9, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000009077812 */
/* 0x003fc800078ec0ff */
/*06f0*/ ISETP.GE.U32.AND P0, PT, R12.reuse, R7, PT ; /* 0x000000070c00720c */
/* 0x040fe20003f06070 */
/*0700*/ IMAD.IADD R6, R12, 0x1, -R7 ; /* 0x000000010c067824 */
/* 0x000fc600078e0a07 */
/*0710*/ SEL R13, R13, 0x63400000, !P0 ; /* 0x634000000d0d7807 */
/* 0x000fe40004000000 */
/*0720*/ IMNMX R6, R6, -0x46a00000, !PT ; /* 0xb960000006067817 */
/* 0x000fc80007800200 */
/*0730*/ IMNMX R6, R6, 0x46a00000, PT ; /* 0x46a0000006067817 */
/* 0x000fca0003800200 */
/*0740*/ IMAD.IADD R16, R6, 0x1, -R13 ; /* 0x0000000106107824 */
/* 0x000fe400078e0a0d */
/*0750*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x000fc600078e00ff */
/*0760*/ IADD3 R7, R16, 0x7fe00000, RZ ; /* 0x7fe0000010077810 */
/* 0x000fcc0007ffe0ff */
/*0770*/ DMUL R12, R14, R6 ; /* 0x000000060e0c7228 */
/* 0x000e140000000000 */
/*0780*/ FSETP.GTU.AND P0, PT, |R13|, 1.469367938527859385e-39, PT ; /* 0x001000000d00780b */
/* 0x001fda0003f0c200 */
/*0790*/ @P0 BRA 0x9f0 ; /* 0x0000025000000947 */
/* 0x000fea0003800000 */
/*07a0*/ DFMA R4, R14, -R4, R10 ; /* 0x800000040e04722b */
/* 0x000e22000000000a */
/*07b0*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x000fd200078e00ff */
/*07c0*/ FSETP.NEU.AND P0, PT, R5.reuse, RZ, PT ; /* 0x000000ff0500720b */
/* 0x041fe40003f0d000 */
/*07d0*/ LOP3.LUT R9, R5, 0x80000000, R9, 0x48, !PT ; /* 0x8000000005097812 */
/* 0x000fc800078e4809 */
/*07e0*/ LOP3.LUT R7, R9, R7, RZ, 0xfc, !PT ; /* 0x0000000709077212 */
/* 0x000fce00078efcff */
/*07f0*/ @!P0 BRA 0x9f0 ; /* 0x000001f000008947 */
/* 0x000fea0003800000 */
/*0800*/ IMAD.MOV R5, RZ, RZ, -R16 ; /* 0x000000ffff057224 */
/* 0x000fe200078e0a10 */
/*0810*/ DMUL.RP R6, R14, R6 ; /* 0x000000060e067228 */
/* 0x000e220000008000 */
/*0820*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fcc00078e00ff */
/*0830*/ DFMA R4, R12, -R4, R14 ; /* 0x800000040c04722b */
/* 0x000e46000000000e */
/*0840*/ LOP3.LUT R9, R7, R9, RZ, 0x3c, !PT ; /* 0x0000000907097212 */
/* 0x001fc600078e3cff */
/*0850*/ IADD3 R4, -R16, -0x43300000, RZ ; /* 0xbcd0000010047810 */
/* 0x002fc80007ffe1ff */
/*0860*/ FSETP.NEU.AND P0, PT, |R5|, R4, PT ; /* 0x000000040500720b */
/* 0x000fc80003f0d200 */
/*0870*/ FSEL R12, R6, R12, !P0 ; /* 0x0000000c060c7208 */
/* 0x000fe40004000000 */
/*0880*/ FSEL R13, R9, R13, !P0 ; /* 0x0000000d090d7208 */
/* 0x000fe20004000000 */
/*0890*/ BRA 0x9f0 ; /* 0x0000015000007947 */
/* 0x000fea0003800000 */
/*08a0*/ DSETP.NAN.AND P0, PT, R6, R6, PT ; /* 0x000000060600722a */
/* 0x003e1c0003f08000 */
/*08b0*/ @P0 BRA 0x9d0 ; /* 0x0000011000000947 */
/* 0x001fea0003800000 */
/*08c0*/ DSETP.NAN.AND P0, PT, R8, R8, PT ; /* 0x000000080800722a */
/* 0x000e1c0003f08000 */
/*08d0*/ @P0 BRA 0x9a0 ; /* 0x000000c000000947 */
/* 0x001fea0003800000 */
/*08e0*/ ISETP.NE.AND P0, PT, R16, R17, PT ; /* 0x000000111000720c */
/* 0x000fe20003f05270 */
/*08f0*/ IMAD.MOV.U32 R12, RZ, RZ, 0x0 ; /* 0x00000000ff0c7424 */
/* 0x000fe400078e00ff */
/*0900*/ IMAD.MOV.U32 R13, RZ, RZ, -0x80000 ; /* 0xfff80000ff0d7424 */
/* 0x000fd400078e00ff */
/*0910*/ @!P0 BRA 0x9f0 ; /* 0x000000d000008947 */
/* 0x000fea0003800000 */
/*0920*/ ISETP.NE.AND P0, PT, R16, 0x7ff00000, PT ; /* 0x7ff000001000780c */
/* 0x000fe40003f05270 */
/*0930*/ LOP3.LUT R13, R7, 0x80000000, R9, 0x48, !PT ; /* 0x80000000070d7812 */
/* 0x000fe400078e4809 */
/*0940*/ ISETP.EQ.OR P0, PT, R17, RZ, !P0 ; /* 0x000000ff1100720c */
/* 0x000fda0004702670 */
/*0950*/ @P0 LOP3.LUT R4, R13, 0x7ff00000, RZ, 0xfc, !PT ; /* 0x7ff000000d040812 */
/* 0x000fe200078efcff */
/*0960*/ @!P0 IMAD.MOV.U32 R12, RZ, RZ, RZ ; /* 0x000000ffff0c8224 */
/* 0x000fe400078e00ff */
/*0970*/ @P0 IMAD.MOV.U32 R12, RZ, RZ, RZ ; /* 0x000000ffff0c0224 */
/* 0x000fe400078e00ff */
/*0980*/ @P0 IMAD.MOV.U32 R13, RZ, RZ, R4 ; /* 0x000000ffff0d0224 */
/* 0x000fe200078e0004 */
/*0990*/ BRA 0x9f0 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*09a0*/ LOP3.LUT R13, R9, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000090d7812 */
/* 0x000fe200078efcff */
/*09b0*/ IMAD.MOV.U32 R12, RZ, RZ, R8 ; /* 0x000000ffff0c7224 */
/* 0x000fe200078e0008 */
/*09c0*/ BRA 0x9f0 ; /* 0x0000002000007947 */
/* 0x000fea0003800000 */
/*09d0*/ LOP3.LUT R13, R7, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000070d7812 */
/* 0x000fe200078efcff */
/*09e0*/ IMAD.MOV.U32 R12, RZ, RZ, R6 ; /* 0x000000ffff0c7224 */
/* 0x000fe400078e0006 */
/*09f0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0a00*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */
/* 0x000fe400078e0000 */
/*0a10*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */
/* 0x000fc800078e00ff */
/*0a20*/ RET.REL.NODEC R4 0x0 ; /* 0xfffff5d004007950 */
/* 0x000fea0003c3ffff */
/*0a30*/ BRA 0xa30; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0a40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0aa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ab0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ac0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ad0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ae0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0af0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void StarRadKernel (double *Qbase2, double *Vrad, double *QStar, double dt, int nrad, int nsec, double *invdiffRmed, double *Rmed, double *dq)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double dqm, dqp;
if (i<nrad && j<nsec){
if ((i == 0 || i == nrad-1)) dq[i + j*nrad] = 0.0;
else {
dqm = (Qbase2[i*nsec + j] - Qbase2[(i-1)*nsec + j])*invdiffRmed[i];
dqp = (Qbase2[(i+1)*nsec + j] - Qbase2[i*nsec + j])*invdiffRmed[i+1];
if (dqp * dqm > 0.0)
dq[i+j*nrad] = 2.0*dqp*dqm/(dqp+dqm);
else
dq[i+j*nrad] = 0.0;
}
}
} | .file "tmpxft_00035ec0_00000000-6_StarRadKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z46__device_stub__Z13StarRadKernelPdS_S_diiS_S_S_PdS_S_diiS_S_S_
.type _Z46__device_stub__Z13StarRadKernelPdS_S_diiS_S_S_PdS_S_diiS_S_S_, @function
_Z46__device_stub__Z13StarRadKernelPdS_S_diiS_S_S_PdS_S_diiS_S_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movsd %xmm0, 32(%rsp)
movl %ecx, 28(%rsp)
movl %r8d, 24(%rsp)
movq %r9, 16(%rsp)
movq 224(%rsp), %rax
movq %rax, 8(%rsp)
movq 232(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 28(%rsp), %rax
movq %rax, 160(%rsp)
leaq 24(%rsp), %rax
movq %rax, 168(%rsp)
leaq 16(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
movq %rsp, %rax
movq %rax, 192(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z13StarRadKernelPdS_S_diiS_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z46__device_stub__Z13StarRadKernelPdS_S_diiS_S_S_PdS_S_diiS_S_S_, .-_Z46__device_stub__Z13StarRadKernelPdS_S_diiS_S_S_PdS_S_diiS_S_S_
.globl _Z13StarRadKernelPdS_S_diiS_S_S_
.type _Z13StarRadKernelPdS_S_diiS_S_S_, @function
_Z13StarRadKernelPdS_S_diiS_S_S_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z46__device_stub__Z13StarRadKernelPdS_S_diiS_S_S_PdS_S_diiS_S_S_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z13StarRadKernelPdS_S_diiS_S_S_, .-_Z13StarRadKernelPdS_S_diiS_S_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z13StarRadKernelPdS_S_diiS_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13StarRadKernelPdS_S_diiS_S_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void StarRadKernel (double *Qbase2, double *Vrad, double *QStar, double dt, int nrad, int nsec, double *invdiffRmed, double *Rmed, double *dq)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double dqm, dqp;
if (i<nrad && j<nsec){
if ((i == 0 || i == nrad-1)) dq[i + j*nrad] = 0.0;
else {
dqm = (Qbase2[i*nsec + j] - Qbase2[(i-1)*nsec + j])*invdiffRmed[i];
dqp = (Qbase2[(i+1)*nsec + j] - Qbase2[i*nsec + j])*invdiffRmed[i+1];
if (dqp * dqm > 0.0)
dq[i+j*nrad] = 2.0*dqp*dqm/(dqp+dqm);
else
dq[i+j*nrad] = 0.0;
}
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void StarRadKernel (double *Qbase2, double *Vrad, double *QStar, double dt, int nrad, int nsec, double *invdiffRmed, double *Rmed, double *dq)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double dqm, dqp;
if (i<nrad && j<nsec){
if ((i == 0 || i == nrad-1)) dq[i + j*nrad] = 0.0;
else {
dqm = (Qbase2[i*nsec + j] - Qbase2[(i-1)*nsec + j])*invdiffRmed[i];
dqp = (Qbase2[(i+1)*nsec + j] - Qbase2[i*nsec + j])*invdiffRmed[i+1];
if (dqp * dqm > 0.0)
dq[i+j*nrad] = 2.0*dqp*dqm/(dqp+dqm);
else
dq[i+j*nrad] = 0.0;
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void StarRadKernel (double *Qbase2, double *Vrad, double *QStar, double dt, int nrad, int nsec, double *invdiffRmed, double *Rmed, double *dq)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double dqm, dqp;
if (i<nrad && j<nsec){
if ((i == 0 || i == nrad-1)) dq[i + j*nrad] = 0.0;
else {
dqm = (Qbase2[i*nsec + j] - Qbase2[(i-1)*nsec + j])*invdiffRmed[i];
dqp = (Qbase2[(i+1)*nsec + j] - Qbase2[i*nsec + j])*invdiffRmed[i+1];
if (dqp * dqm > 0.0)
dq[i+j*nrad] = 2.0*dqp*dqm/(dqp+dqm);
else
dq[i+j*nrad] = 0.0;
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13StarRadKernelPdS_S_diiS_S_S_
.globl _Z13StarRadKernelPdS_S_diiS_S_S_
.p2align 8
.type _Z13StarRadKernelPdS_S_diiS_S_S_,@function
_Z13StarRadKernelPdS_S_diiS_S_S_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x4c
s_load_b64 s[4:5], s[0:1], 0x20
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v4, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s15, s2, v[2:3]
v_mad_u64_u32 v[2:3], null, s14, s3, v[4:5]
v_cmp_gt_i32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s5, v2
s_and_b32 s2, s2, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_6
s_add_i32 s2, s4, -1
v_cmp_ne_u32_e32 vcc_lo, 0, v0
v_cmp_ne_u32_e64 s2, s2, v0
v_mov_b32_e32 v3, 0
v_mov_b32_e32 v4, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s3, vcc_lo, s2
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB0_5
v_add_nc_u32_e32 v3, 1, v0
v_add_nc_u32_e32 v1, -1, v0
s_clause 0x1
s_load_b64 s[6:7], s[0:1], 0x0
s_load_b64 s[8:9], s[0:1], 0x28
v_mad_u64_u32 v[4:5], null, v0, s5, v[2:3]
v_mad_u64_u32 v[6:7], null, v1, s5, v[2:3]
v_mad_u64_u32 v[8:9], null, v3, s5, v[2:3]
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_ashrrev_i32_e32 v5, 31, v4
v_ashrrev_i32_e32 v7, 31, v6
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[12:13], 3, v[0:1]
v_ashrrev_i32_e32 v9, 31, v8
v_lshlrev_b64 v[4:5], 3, v[4:5]
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[6:7], 3, v[6:7]
v_lshlrev_b64 v[8:9], 3, v[8:9]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v6, vcc_lo, s6, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
v_add_co_u32 v8, vcc_lo, s6, v8
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v9, vcc_lo
s_clause 0x2
global_load_b64 v[10:11], v[4:5], off
global_load_b64 v[5:6], v[6:7], off
global_load_b64 v[7:8], v[8:9], off
v_ashrrev_i32_e32 v4, 31, v3
v_add_co_u32 v12, vcc_lo, s8, v12
v_add_co_ci_u32_e32 v13, vcc_lo, s9, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 3, v[3:4]
v_add_co_u32 v3, vcc_lo, s8, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s9, v4, vcc_lo
s_clause 0x1
global_load_b64 v[12:13], v[12:13], off
global_load_b64 v[3:4], v[3:4], off
s_waitcnt vmcnt(3)
v_add_f64 v[5:6], v[10:11], -v[5:6]
s_waitcnt vmcnt(2)
v_add_f64 v[7:8], v[7:8], -v[10:11]
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_f64 v[5:6], v[5:6], v[12:13]
s_waitcnt vmcnt(0)
v_mul_f64 v[7:8], v[7:8], v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[3:4], v[5:6], v[7:8]
v_cmp_lt_f64_e32 vcc_lo, 0, v[3:4]
v_mov_b32_e32 v3, 0
v_mov_b32_e32 v4, 0
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB0_4
v_add_f64 v[3:4], v[7:8], v[7:8]
v_add_f64 v[7:8], v[5:6], v[7:8]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[3:4], v[5:6], v[3:4]
v_div_scale_f64 v[5:6], null, v[7:8], v[7:8], v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f64_e32 v[9:10], v[5:6]
s_waitcnt_depctr 0xfff
v_fma_f64 v[11:12], -v[5:6], v[9:10], 1.0
v_fma_f64 v[9:10], v[9:10], v[11:12], v[9:10]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[11:12], -v[5:6], v[9:10], 1.0
v_fma_f64 v[9:10], v[9:10], v[11:12], v[9:10]
v_div_scale_f64 v[11:12], vcc_lo, v[3:4], v[7:8], v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[13:14], v[11:12], v[9:10]
v_fma_f64 v[5:6], -v[5:6], v[13:14], v[11:12]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f64 v[5:6], v[5:6], v[9:10], v[13:14]
v_div_fixup_f64 v[3:4], v[5:6], v[7:8], v[3:4]
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s3
.LBB0_5:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_or_b32 exec_lo, exec_lo, s2
s_load_b64 s[0:1], s[0:1], 0x38
v_mad_u64_u32 v[5:6], null, v2, s4, v[0:1]
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[5:6]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b64 v[0:1], v[3:4], off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13StarRadKernelPdS_S_diiS_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 320
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13StarRadKernelPdS_S_diiS_S_S_, .Lfunc_end0-_Z13StarRadKernelPdS_S_diiS_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 8
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .offset: 64
.size: 4
.value_kind: hidden_block_count_x
- .offset: 68
.size: 4
.value_kind: hidden_block_count_y
- .offset: 72
.size: 4
.value_kind: hidden_block_count_z
- .offset: 76
.size: 2
.value_kind: hidden_group_size_x
- .offset: 78
.size: 2
.value_kind: hidden_group_size_y
- .offset: 80
.size: 2
.value_kind: hidden_group_size_z
- .offset: 82
.size: 2
.value_kind: hidden_remainder_x
- .offset: 84
.size: 2
.value_kind: hidden_remainder_y
- .offset: 86
.size: 2
.value_kind: hidden_remainder_z
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 128
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 320
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13StarRadKernelPdS_S_diiS_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13StarRadKernelPdS_S_diiS_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void StarRadKernel (double *Qbase2, double *Vrad, double *QStar, double dt, int nrad, int nsec, double *invdiffRmed, double *Rmed, double *dq)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double dqm, dqp;
if (i<nrad && j<nsec){
if ((i == 0 || i == nrad-1)) dq[i + j*nrad] = 0.0;
else {
dqm = (Qbase2[i*nsec + j] - Qbase2[(i-1)*nsec + j])*invdiffRmed[i];
dqp = (Qbase2[(i+1)*nsec + j] - Qbase2[i*nsec + j])*invdiffRmed[i+1];
if (dqp * dqm > 0.0)
dq[i+j*nrad] = 2.0*dqp*dqm/(dqp+dqm);
else
dq[i+j*nrad] = 0.0;
}
}
} | .text
.file "StarRadKernel.hip"
.globl _Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_ # -- Begin function _Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_
.p2align 4, 0x90
.type _Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_,@function
_Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_: # @_Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movsd %xmm0, 64(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %r9, 56(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rax
movq %rax, 144(%rsp)
leaq 176(%rsp), %rax
movq %rax, 152(%rsp)
leaq 184(%rsp), %rax
movq %rax, 160(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13StarRadKernelPdS_S_diiS_S_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_, .Lfunc_end0-_Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13StarRadKernelPdS_S_diiS_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13StarRadKernelPdS_S_diiS_S_S_,@object # @_Z13StarRadKernelPdS_S_diiS_S_S_
.section .rodata,"a",@progbits
.globl _Z13StarRadKernelPdS_S_diiS_S_S_
.p2align 3, 0x0
_Z13StarRadKernelPdS_S_diiS_S_S_:
.quad _Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_
.size _Z13StarRadKernelPdS_S_diiS_S_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13StarRadKernelPdS_S_diiS_S_S_"
.size .L__unnamed_1, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13StarRadKernelPdS_S_diiS_S_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z13StarRadKernelPdS_S_diiS_S_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R11, SR_CTAID.Y ; /* 0x00000000000b7919 */
/* 0x000e680000002600 */
/*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x184], PT ; /* 0x0000610000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R11, R11, c[0x0][0x4], R2 ; /* 0x000001000b0b7a24 */
/* 0x002fca00078e0202 */
/*0080*/ ISETP.GE.OR P0, PT, R11, c[0x0][0x180], P0 ; /* 0x000060000b007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ ULDC UR4, c[0x0][0x180] ; /* 0x0000600000047ab9 */
/* 0x000fe20000000800 */
/*00b0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x8 ; /* 0x00000008ff097424 */
/* 0x000fe200078e00ff */
/*00c0*/ UIADD3 UR4, UR4, -0x1, URZ ; /* 0xffffffff04047890 */
/* 0x000fe2000fffe03f */
/*00d0*/ IMAD R2, R0, c[0x0][0x180], R11 ; /* 0x0000600000027a24 */
/* 0x000fc800078e020b */
/*00e0*/ IMAD.WIDE R2, R2, R9, c[0x0][0x198] ; /* 0x0000660002027625 */
/* 0x000fe200078e0209 */
/*00f0*/ ISETP.NE.AND P0, PT, R11, UR4, PT ; /* 0x000000040b007c0c */
/* 0x000fe2000bf05270 */
/*0100*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0110*/ ISETP.EQ.OR P0, PT, R11, RZ, !P0 ; /* 0x000000ff0b00720c */
/* 0x000fda0004702670 */
/*0120*/ @P0 BRA 0x420 ; /* 0x000002f000000947 */
/* 0x000fea0003800000 */
/*0130*/ IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff0e7624 */
/* 0x000fe400078e00ff */
/*0140*/ IMAD R4, R11.reuse, c[0x0][0x184], R0 ; /* 0x000061000b047a24 */
/* 0x040fe400078e0200 */
/*0150*/ IMAD R5, R11.reuse, R14, -c[0x0][0x184] ; /* 0x800061000b057624 */
/* 0x040fe400078e020e */
/*0160*/ IMAD.WIDE R10, R11, R9, c[0x0][0x188] ; /* 0x000062000b0a7625 */
/* 0x000fc800078e0209 */
/*0170*/ IMAD.IADD R6, R0, 0x1, R5 ; /* 0x0000000100067824 */
/* 0x000fe200078e0205 */
/*0180*/ LDG.E.64 R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea2000c1e1b00 */
/*0190*/ IMAD.WIDE R4, R4, R9, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fc600078e0209 */
/*01a0*/ LDG.E.64 R18, [R10.64+0x8] ; /* 0x000008040a127981 */
/* 0x000ee2000c1e1b00 */
/*01b0*/ IMAD R14, R14, 0x2, R6 ; /* 0x000000020e0e7824 */
/* 0x000fe400078e0206 */
/*01c0*/ IMAD.WIDE R6, R6, R9.reuse, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x080fe200078e0209 */
/*01d0*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000f26000c1e1b00 */
/*01e0*/ IMAD.WIDE R14, R14, R9, c[0x0][0x160] ; /* 0x000058000e0e7625 */
/* 0x000fe400078e0209 */
/*01f0*/ LDG.E.64 R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000f28000c1e1b00 */
/*0200*/ LDG.E.64 R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000f62000c1e1b00 */
/*0210*/ DADD R8, -R6, R4 ; /* 0x0000000006087229 */
/* 0x010e880000000104 */
/*0220*/ DADD R16, -R4, R14 ; /* 0x0000000004107229 */
/* 0x020ec8000000010e */
/*0230*/ DMUL R12, R8, R12 ; /* 0x0000000c080c7228 */
/* 0x004fc80000000000 */
/*0240*/ DMUL R16, R16, R18 ; /* 0x0000001210107228 */
/* 0x008e0c0000000000 */
/*0250*/ DMUL R8, R12, R16 ; /* 0x000000100c087228 */
/* 0x001e0c0000000000 */
/*0260*/ DSETP.GT.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800722a */
/* 0x001e1c0003f04000 */
/*0270*/ @!P0 STG.E.64 [R2.64], RZ ; /* 0x000000ff02008986 */
/* 0x0011e2000c101b04 */
/*0280*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0290*/ DADD R8, R12, R16.reuse ; /* 0x000000000c087229 */
/* 0x100e620000000010 */
/*02a0*/ IMAD.MOV.U32 R4, RZ, RZ, 0x1 ; /* 0x00000001ff047424 */
/* 0x000fe200078e00ff */
/*02b0*/ BSSY B0, 0x400 ; /* 0x0000014000007945 */
/* 0x000fe40003800000 */
/*02c0*/ DADD R16, R16, R16 ; /* 0x0000000010107229 */
/* 0x000ea40000000010 */
/*02d0*/ MUFU.RCP64H R5, R9 ; /* 0x0000000900057308 */
/* 0x002e680000001800 */
/*02e0*/ DMUL R16, R12, R16 ; /* 0x000000100c107228 */
/* 0x004e940000000000 */
/*02f0*/ FSETP.GEU.AND P1, PT, |R17|, 6.5827683646048100446e-37, PT ; /* 0x036000001100780b */
/* 0x004fe20003f2e200 */
/*0300*/ DFMA R6, -R8, R4, 1 ; /* 0x3ff000000806742b */
/* 0x002e4c0000000104 */
/*0310*/ DFMA R6, R6, R6, R6 ; /* 0x000000060606722b */
/* 0x002e4c0000000006 */
/*0320*/ DFMA R6, R4, R6, R4 ; /* 0x000000060406722b */
/* 0x002e4c0000000004 */
/*0330*/ DFMA R4, -R8, R6, 1 ; /* 0x3ff000000804742b */
/* 0x002e4c0000000106 */
/*0340*/ DFMA R4, R6, R4, R6 ; /* 0x000000040604722b */
/* 0x002e4c0000000006 */
/*0350*/ DMUL R6, R16, R4 ; /* 0x0000000410067228 */
/* 0x002e4c0000000000 */
/*0360*/ DFMA R10, -R8, R6, R16 ; /* 0x00000006080a722b */
/* 0x002e4c0000000110 */
/*0370*/ DFMA R4, R4, R10, R6 ; /* 0x0000000a0404722b */
/* 0x002e540000000006 */
/*0380*/ FFMA R0, RZ, R9, R5 ; /* 0x00000009ff007223 */
/* 0x002fca0000000005 */
/*0390*/ FSETP.GT.AND P0, PT, |R0|, 1.469367938527859385e-39, PT ; /* 0x001000000000780b */
/* 0x000fda0003f04200 */
/*03a0*/ @P0 BRA P1, 0x3f0 ; /* 0x0000004000000947 */
/* 0x000fea0000800000 */
/*03b0*/ MOV R0, 0x3d0 ; /* 0x000003d000007802 */
/* 0x000fe40000000f00 */
/*03c0*/ CALL.REL.NOINC 0x440 ; /* 0x0000007000007944 */
/* 0x001fea0003c00000 */
/*03d0*/ IMAD.MOV.U32 R4, RZ, RZ, R12 ; /* 0x000000ffff047224 */
/* 0x000fe400078e000c */
/*03e0*/ IMAD.MOV.U32 R5, RZ, RZ, R13 ; /* 0x000000ffff057224 */
/* 0x000fe400078e000d */
/*03f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0400*/ STG.E.64 [R2.64], R4 ; /* 0x0000000402007986 */
/* 0x000fe2000c101b04 */
/*0410*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0420*/ STG.E.64 [R2.64], RZ ; /* 0x000000ff02007986 */
/* 0x000fe2000c101b04 */
/*0430*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0440*/ FSETP.GEU.AND P0, PT, |R9|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000000900780b */
/* 0x040fe20003f0e200 */
/*0450*/ IMAD.MOV.U32 R7, RZ, RZ, R17 ; /* 0x000000ffff077224 */
/* 0x000fe200078e0011 */
/*0460*/ LOP3.LUT R4, R9.reuse, 0x800fffff, RZ, 0xc0, !PT ; /* 0x800fffff09047812 */
/* 0x040fe200078ec0ff */
/*0470*/ IMAD.MOV.U32 R13, RZ, RZ, 0x1ca00000 ; /* 0x1ca00000ff0d7424 */
/* 0x000fe200078e00ff */
/*0480*/ LOP3.LUT R15, R9, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff00000090f7812 */
/* 0x000fe200078ec0ff */
/*0490*/ IMAD.MOV.U32 R6, RZ, RZ, R16 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0010 */
/*04a0*/ LOP3.LUT R5, R4, 0x3ff00000, RZ, 0xfc, !PT ; /* 0x3ff0000004057812 */
/* 0x000fe200078efcff */
/*04b0*/ IMAD.MOV.U32 R4, RZ, RZ, R8 ; /* 0x000000ffff047224 */
/* 0x000fe200078e0008 */
/*04c0*/ FSETP.GEU.AND P2, PT, |R7|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000000700780b */
/* 0x040fe20003f4e200 */
/*04d0*/ IMAD.MOV.U32 R16, RZ, RZ, 0x1 ; /* 0x00000001ff107424 */
/* 0x000fe200078e00ff */
/*04e0*/ LOP3.LUT R12, R7, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff00000070c7812 */
/* 0x000fe200078ec0ff */
/*04f0*/ IMAD.MOV.U32 R10, RZ, RZ, R6 ; /* 0x000000ffff0a7224 */
/* 0x000fe200078e0006 */
/*0500*/ BSSY B1, 0xa00 ; /* 0x000004f000017945 */
/* 0x000fe20003800000 */
/*0510*/ @!P0 DMUL R4, R8, 8.98846567431157953865e+307 ; /* 0x7fe0000008048828 */
/* 0x000e220000000000 */
/*0520*/ ISETP.GE.U32.AND P1, PT, R12, R15, PT ; /* 0x0000000f0c00720c */
/* 0x000fca0003f26070 */
/*0530*/ MUFU.RCP64H R17, R5 ; /* 0x0000000500117308 */
/* 0x001e240000001800 */
/*0540*/ @!P2 LOP3.LUT R11, R9, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff00000090ba812 */
/* 0x000fe200078ec0ff */
/*0550*/ @!P2 IMAD.MOV.U32 R20, RZ, RZ, RZ ; /* 0x000000ffff14a224 */
/* 0x000fc600078e00ff */
/*0560*/ @!P2 ISETP.GE.U32.AND P3, PT, R12, R11, PT ; /* 0x0000000b0c00a20c */
/* 0x000fe40003f66070 */
/*0570*/ SEL R11, R13.reuse, 0x63400000, !P1 ; /* 0x634000000d0b7807 */
/* 0x040fe40004800000 */
/*0580*/ @!P2 SEL R21, R13, 0x63400000, !P3 ; /* 0x634000000d15a807 */
/* 0x000fe40005800000 */
/*0590*/ LOP3.LUT R11, R11, 0x800fffff, R7.reuse, 0xf8, !PT ; /* 0x800fffff0b0b7812 */
/* 0x100fe400078ef807 */
/*05a0*/ @!P2 LOP3.LUT R21, R21, 0x80000000, R7, 0xf8, !PT ; /* 0x800000001515a812 */
/* 0x000fe200078ef807 */
/*05b0*/ DFMA R18, R16, -R4, 1 ; /* 0x3ff000001012742b */
/* 0x001e060000000804 */
/*05c0*/ @!P2 LOP3.LUT R21, R21, 0x100000, RZ, 0xfc, !PT ; /* 0x001000001515a812 */
/* 0x000fc600078efcff */
/*05d0*/ DFMA R18, R18, R18, R18 ; /* 0x000000121212722b */
/* 0x001e080000000012 */
/*05e0*/ @!P2 DFMA R10, R10, 2, -R20 ; /* 0x400000000a0aa82b */
/* 0x000fc80000000814 */
/*05f0*/ DFMA R18, R16, R18, R16 ; /* 0x000000121012722b */
/* 0x0010640000000010 */
/*0600*/ IMAD.MOV.U32 R16, RZ, RZ, R12 ; /* 0x000000ffff107224 */
/* 0x001fe400078e000c */
/*0610*/ IMAD.MOV.U32 R17, RZ, RZ, R15 ; /* 0x000000ffff117224 */
/* 0x000fe200078e000f */
/*0620*/ @!P0 LOP3.LUT R17, R5, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000005118812 */
/* 0x000fe200078ec0ff */
/*0630*/ DFMA R20, R18, -R4, 1 ; /* 0x3ff000001214742b */
/* 0x002e220000000804 */
/*0640*/ @!P2 LOP3.LUT R16, R11, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff000000b10a812 */
/* 0x000fe400078ec0ff */
/*0650*/ IADD3 R22, R17, -0x1, RZ ; /* 0xffffffff11167810 */
/* 0x000fc60007ffe0ff */
/*0660*/ DFMA R18, R18, R20, R18 ; /* 0x000000141212722b */
/* 0x0010640000000012 */
/*0670*/ IADD3 R20, R16, -0x1, RZ ; /* 0xffffffff10147810 */
/* 0x001fc80007ffe0ff */
/*0680*/ ISETP.GT.U32.AND P0, PT, R20, 0x7feffffe, PT ; /* 0x7feffffe1400780c */
/* 0x000fe20003f04070 */
/*0690*/ DMUL R14, R18, R10 ; /* 0x0000000a120e7228 */
/* 0x002e060000000000 */
/*06a0*/ ISETP.GT.U32.OR P0, PT, R22, 0x7feffffe, P0 ; /* 0x7feffffe1600780c */
/* 0x000fc60000704470 */
/*06b0*/ DFMA R20, R14, -R4, R10 ; /* 0x800000040e14722b */
/* 0x001e0c000000000a */
/*06c0*/ DFMA R14, R18, R20, R14 ; /* 0x00000014120e722b */
/* 0x001048000000000e */
/*06d0*/ @P0 BRA 0x8a0 ; /* 0x000001c000000947 */
/* 0x000fea0003800000 */
/*06e0*/ LOP3.LUT R7, R9, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000009077812 */
/* 0x003fc800078ec0ff */
/*06f0*/ ISETP.GE.U32.AND P0, PT, R12.reuse, R7, PT ; /* 0x000000070c00720c */
/* 0x040fe20003f06070 */
/*0700*/ IMAD.IADD R6, R12, 0x1, -R7 ; /* 0x000000010c067824 */
/* 0x000fc600078e0a07 */
/*0710*/ SEL R13, R13, 0x63400000, !P0 ; /* 0x634000000d0d7807 */
/* 0x000fe40004000000 */
/*0720*/ IMNMX R6, R6, -0x46a00000, !PT ; /* 0xb960000006067817 */
/* 0x000fc80007800200 */
/*0730*/ IMNMX R6, R6, 0x46a00000, PT ; /* 0x46a0000006067817 */
/* 0x000fca0003800200 */
/*0740*/ IMAD.IADD R16, R6, 0x1, -R13 ; /* 0x0000000106107824 */
/* 0x000fe400078e0a0d */
/*0750*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x000fc600078e00ff */
/*0760*/ IADD3 R7, R16, 0x7fe00000, RZ ; /* 0x7fe0000010077810 */
/* 0x000fcc0007ffe0ff */
/*0770*/ DMUL R12, R14, R6 ; /* 0x000000060e0c7228 */
/* 0x000e140000000000 */
/*0780*/ FSETP.GTU.AND P0, PT, |R13|, 1.469367938527859385e-39, PT ; /* 0x001000000d00780b */
/* 0x001fda0003f0c200 */
/*0790*/ @P0 BRA 0x9f0 ; /* 0x0000025000000947 */
/* 0x000fea0003800000 */
/*07a0*/ DFMA R4, R14, -R4, R10 ; /* 0x800000040e04722b */
/* 0x000e22000000000a */
/*07b0*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x000fd200078e00ff */
/*07c0*/ FSETP.NEU.AND P0, PT, R5.reuse, RZ, PT ; /* 0x000000ff0500720b */
/* 0x041fe40003f0d000 */
/*07d0*/ LOP3.LUT R9, R5, 0x80000000, R9, 0x48, !PT ; /* 0x8000000005097812 */
/* 0x000fc800078e4809 */
/*07e0*/ LOP3.LUT R7, R9, R7, RZ, 0xfc, !PT ; /* 0x0000000709077212 */
/* 0x000fce00078efcff */
/*07f0*/ @!P0 BRA 0x9f0 ; /* 0x000001f000008947 */
/* 0x000fea0003800000 */
/*0800*/ IMAD.MOV R5, RZ, RZ, -R16 ; /* 0x000000ffff057224 */
/* 0x000fe200078e0a10 */
/*0810*/ DMUL.RP R6, R14, R6 ; /* 0x000000060e067228 */
/* 0x000e220000008000 */
/*0820*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fcc00078e00ff */
/*0830*/ DFMA R4, R12, -R4, R14 ; /* 0x800000040c04722b */
/* 0x000e46000000000e */
/*0840*/ LOP3.LUT R9, R7, R9, RZ, 0x3c, !PT ; /* 0x0000000907097212 */
/* 0x001fc600078e3cff */
/*0850*/ IADD3 R4, -R16, -0x43300000, RZ ; /* 0xbcd0000010047810 */
/* 0x002fc80007ffe1ff */
/*0860*/ FSETP.NEU.AND P0, PT, |R5|, R4, PT ; /* 0x000000040500720b */
/* 0x000fc80003f0d200 */
/*0870*/ FSEL R12, R6, R12, !P0 ; /* 0x0000000c060c7208 */
/* 0x000fe40004000000 */
/*0880*/ FSEL R13, R9, R13, !P0 ; /* 0x0000000d090d7208 */
/* 0x000fe20004000000 */
/*0890*/ BRA 0x9f0 ; /* 0x0000015000007947 */
/* 0x000fea0003800000 */
/*08a0*/ DSETP.NAN.AND P0, PT, R6, R6, PT ; /* 0x000000060600722a */
/* 0x003e1c0003f08000 */
/*08b0*/ @P0 BRA 0x9d0 ; /* 0x0000011000000947 */
/* 0x001fea0003800000 */
/*08c0*/ DSETP.NAN.AND P0, PT, R8, R8, PT ; /* 0x000000080800722a */
/* 0x000e1c0003f08000 */
/*08d0*/ @P0 BRA 0x9a0 ; /* 0x000000c000000947 */
/* 0x001fea0003800000 */
/*08e0*/ ISETP.NE.AND P0, PT, R16, R17, PT ; /* 0x000000111000720c */
/* 0x000fe20003f05270 */
/*08f0*/ IMAD.MOV.U32 R12, RZ, RZ, 0x0 ; /* 0x00000000ff0c7424 */
/* 0x000fe400078e00ff */
/*0900*/ IMAD.MOV.U32 R13, RZ, RZ, -0x80000 ; /* 0xfff80000ff0d7424 */
/* 0x000fd400078e00ff */
/*0910*/ @!P0 BRA 0x9f0 ; /* 0x000000d000008947 */
/* 0x000fea0003800000 */
/*0920*/ ISETP.NE.AND P0, PT, R16, 0x7ff00000, PT ; /* 0x7ff000001000780c */
/* 0x000fe40003f05270 */
/*0930*/ LOP3.LUT R13, R7, 0x80000000, R9, 0x48, !PT ; /* 0x80000000070d7812 */
/* 0x000fe400078e4809 */
/*0940*/ ISETP.EQ.OR P0, PT, R17, RZ, !P0 ; /* 0x000000ff1100720c */
/* 0x000fda0004702670 */
/*0950*/ @P0 LOP3.LUT R4, R13, 0x7ff00000, RZ, 0xfc, !PT ; /* 0x7ff000000d040812 */
/* 0x000fe200078efcff */
/*0960*/ @!P0 IMAD.MOV.U32 R12, RZ, RZ, RZ ; /* 0x000000ffff0c8224 */
/* 0x000fe400078e00ff */
/*0970*/ @P0 IMAD.MOV.U32 R12, RZ, RZ, RZ ; /* 0x000000ffff0c0224 */
/* 0x000fe400078e00ff */
/*0980*/ @P0 IMAD.MOV.U32 R13, RZ, RZ, R4 ; /* 0x000000ffff0d0224 */
/* 0x000fe200078e0004 */
/*0990*/ BRA 0x9f0 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*09a0*/ LOP3.LUT R13, R9, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000090d7812 */
/* 0x000fe200078efcff */
/*09b0*/ IMAD.MOV.U32 R12, RZ, RZ, R8 ; /* 0x000000ffff0c7224 */
/* 0x000fe200078e0008 */
/*09c0*/ BRA 0x9f0 ; /* 0x0000002000007947 */
/* 0x000fea0003800000 */
/*09d0*/ LOP3.LUT R13, R7, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000070d7812 */
/* 0x000fe200078efcff */
/*09e0*/ IMAD.MOV.U32 R12, RZ, RZ, R6 ; /* 0x000000ffff0c7224 */
/* 0x000fe400078e0006 */
/*09f0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0a00*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */
/* 0x000fe400078e0000 */
/*0a10*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */
/* 0x000fc800078e00ff */
/*0a20*/ RET.REL.NODEC R4 0x0 ; /* 0xfffff5d004007950 */
/* 0x000fea0003c3ffff */
/*0a30*/ BRA 0xa30; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0a40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0aa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ab0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ac0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ad0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ae0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0af0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13StarRadKernelPdS_S_diiS_S_S_
.globl _Z13StarRadKernelPdS_S_diiS_S_S_
.p2align 8
.type _Z13StarRadKernelPdS_S_diiS_S_S_,@function
_Z13StarRadKernelPdS_S_diiS_S_S_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x4c
s_load_b64 s[4:5], s[0:1], 0x20
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v4, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s15, s2, v[2:3]
v_mad_u64_u32 v[2:3], null, s14, s3, v[4:5]
v_cmp_gt_i32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s5, v2
s_and_b32 s2, s2, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_6
s_add_i32 s2, s4, -1
v_cmp_ne_u32_e32 vcc_lo, 0, v0
v_cmp_ne_u32_e64 s2, s2, v0
v_mov_b32_e32 v3, 0
v_mov_b32_e32 v4, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s3, vcc_lo, s2
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB0_5
v_add_nc_u32_e32 v3, 1, v0
v_add_nc_u32_e32 v1, -1, v0
s_clause 0x1
s_load_b64 s[6:7], s[0:1], 0x0
s_load_b64 s[8:9], s[0:1], 0x28
v_mad_u64_u32 v[4:5], null, v0, s5, v[2:3]
v_mad_u64_u32 v[6:7], null, v1, s5, v[2:3]
v_mad_u64_u32 v[8:9], null, v3, s5, v[2:3]
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_ashrrev_i32_e32 v5, 31, v4
v_ashrrev_i32_e32 v7, 31, v6
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[12:13], 3, v[0:1]
v_ashrrev_i32_e32 v9, 31, v8
v_lshlrev_b64 v[4:5], 3, v[4:5]
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[6:7], 3, v[6:7]
v_lshlrev_b64 v[8:9], 3, v[8:9]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v6, vcc_lo, s6, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
v_add_co_u32 v8, vcc_lo, s6, v8
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v9, vcc_lo
s_clause 0x2
global_load_b64 v[10:11], v[4:5], off
global_load_b64 v[5:6], v[6:7], off
global_load_b64 v[7:8], v[8:9], off
v_ashrrev_i32_e32 v4, 31, v3
v_add_co_u32 v12, vcc_lo, s8, v12
v_add_co_ci_u32_e32 v13, vcc_lo, s9, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 3, v[3:4]
v_add_co_u32 v3, vcc_lo, s8, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s9, v4, vcc_lo
s_clause 0x1
global_load_b64 v[12:13], v[12:13], off
global_load_b64 v[3:4], v[3:4], off
s_waitcnt vmcnt(3)
v_add_f64 v[5:6], v[10:11], -v[5:6]
s_waitcnt vmcnt(2)
v_add_f64 v[7:8], v[7:8], -v[10:11]
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_f64 v[5:6], v[5:6], v[12:13]
s_waitcnt vmcnt(0)
v_mul_f64 v[7:8], v[7:8], v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[3:4], v[5:6], v[7:8]
v_cmp_lt_f64_e32 vcc_lo, 0, v[3:4]
v_mov_b32_e32 v3, 0
v_mov_b32_e32 v4, 0
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB0_4
v_add_f64 v[3:4], v[7:8], v[7:8]
v_add_f64 v[7:8], v[5:6], v[7:8]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[3:4], v[5:6], v[3:4]
v_div_scale_f64 v[5:6], null, v[7:8], v[7:8], v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f64_e32 v[9:10], v[5:6]
s_waitcnt_depctr 0xfff
v_fma_f64 v[11:12], -v[5:6], v[9:10], 1.0
v_fma_f64 v[9:10], v[9:10], v[11:12], v[9:10]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[11:12], -v[5:6], v[9:10], 1.0
v_fma_f64 v[9:10], v[9:10], v[11:12], v[9:10]
v_div_scale_f64 v[11:12], vcc_lo, v[3:4], v[7:8], v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[13:14], v[11:12], v[9:10]
v_fma_f64 v[5:6], -v[5:6], v[13:14], v[11:12]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f64 v[5:6], v[5:6], v[9:10], v[13:14]
v_div_fixup_f64 v[3:4], v[5:6], v[7:8], v[3:4]
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s3
.LBB0_5:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_or_b32 exec_lo, exec_lo, s2
s_load_b64 s[0:1], s[0:1], 0x38
v_mad_u64_u32 v[5:6], null, v2, s4, v[0:1]
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[5:6]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b64 v[0:1], v[3:4], off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13StarRadKernelPdS_S_diiS_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 320
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13StarRadKernelPdS_S_diiS_S_S_, .Lfunc_end0-_Z13StarRadKernelPdS_S_diiS_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 8
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .offset: 64
.size: 4
.value_kind: hidden_block_count_x
- .offset: 68
.size: 4
.value_kind: hidden_block_count_y
- .offset: 72
.size: 4
.value_kind: hidden_block_count_z
- .offset: 76
.size: 2
.value_kind: hidden_group_size_x
- .offset: 78
.size: 2
.value_kind: hidden_group_size_y
- .offset: 80
.size: 2
.value_kind: hidden_group_size_z
- .offset: 82
.size: 2
.value_kind: hidden_remainder_x
- .offset: 84
.size: 2
.value_kind: hidden_remainder_y
- .offset: 86
.size: 2
.value_kind: hidden_remainder_z
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 128
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 320
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13StarRadKernelPdS_S_diiS_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13StarRadKernelPdS_S_diiS_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00035ec0_00000000-6_StarRadKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z46__device_stub__Z13StarRadKernelPdS_S_diiS_S_S_PdS_S_diiS_S_S_
.type _Z46__device_stub__Z13StarRadKernelPdS_S_diiS_S_S_PdS_S_diiS_S_S_, @function
_Z46__device_stub__Z13StarRadKernelPdS_S_diiS_S_S_PdS_S_diiS_S_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movsd %xmm0, 32(%rsp)
movl %ecx, 28(%rsp)
movl %r8d, 24(%rsp)
movq %r9, 16(%rsp)
movq 224(%rsp), %rax
movq %rax, 8(%rsp)
movq 232(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 28(%rsp), %rax
movq %rax, 160(%rsp)
leaq 24(%rsp), %rax
movq %rax, 168(%rsp)
leaq 16(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
movq %rsp, %rax
movq %rax, 192(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z13StarRadKernelPdS_S_diiS_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z46__device_stub__Z13StarRadKernelPdS_S_diiS_S_S_PdS_S_diiS_S_S_, .-_Z46__device_stub__Z13StarRadKernelPdS_S_diiS_S_S_PdS_S_diiS_S_S_
.globl _Z13StarRadKernelPdS_S_diiS_S_S_
.type _Z13StarRadKernelPdS_S_diiS_S_S_, @function
_Z13StarRadKernelPdS_S_diiS_S_S_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z46__device_stub__Z13StarRadKernelPdS_S_diiS_S_S_PdS_S_diiS_S_S_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z13StarRadKernelPdS_S_diiS_S_S_, .-_Z13StarRadKernelPdS_S_diiS_S_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z13StarRadKernelPdS_S_diiS_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13StarRadKernelPdS_S_diiS_S_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "StarRadKernel.hip"
.globl _Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_ # -- Begin function _Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_
.p2align 4, 0x90
.type _Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_,@function
_Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_: # @_Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movsd %xmm0, 64(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %r9, 56(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rax
movq %rax, 144(%rsp)
leaq 176(%rsp), %rax
movq %rax, 152(%rsp)
leaq 184(%rsp), %rax
movq %rax, 160(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13StarRadKernelPdS_S_diiS_S_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_, .Lfunc_end0-_Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13StarRadKernelPdS_S_diiS_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13StarRadKernelPdS_S_diiS_S_S_,@object # @_Z13StarRadKernelPdS_S_diiS_S_S_
.section .rodata,"a",@progbits
.globl _Z13StarRadKernelPdS_S_diiS_S_S_
.p2align 3, 0x0
_Z13StarRadKernelPdS_S_diiS_S_S_:
.quad _Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_
.size _Z13StarRadKernelPdS_S_diiS_S_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13StarRadKernelPdS_S_diiS_S_S_"
.size .L__unnamed_1, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__StarRadKernelPdS_S_diiS_S_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13StarRadKernelPdS_S_diiS_S_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /* ==================================================================
Programmer: Yicheng Tu (ytu@cse.usf.edu)
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH in the C4 lab machines
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
bucket * h_gpu_histogram;
bucket * d_gpu_histogram;
bucket * diff_histogram;
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
atom * d_atom_list;
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
//get cuda error
// void cuda_err(cudaError_t e, char in[])
// {
// if (e != cudaSuccess){
// printf("CUDA Error: %s, %s \n", in, cudaGetErrorString(e));
// exit(EXIT_FAILURE);
// }
// }
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for(i = 0; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = p2p_distance(i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
SDH kernel - a really crappy one
*/
__global__ void PDH_kernel(bucket* d_histogram, atom* d_atom_list, long long acnt, double res)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
int j, h_pos;
double dist;
double x1;
double x2;
double y1;
double y2;
double z1;
double z2;
if(id < acnt)
for(j = id+1; j < acnt; j++)
{
x1 = d_atom_list[id].x_pos;
x2 = d_atom_list[j].x_pos;
y1 = d_atom_list[id].y_pos;
y2 = d_atom_list[j].y_pos;
z1 = d_atom_list[id].z_pos;
z2 = d_atom_list[j].z_pos;
dist = sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
h_pos = (int) (dist / res);
// __syncthreads();
// d_histogram[h_pos].d_cnt += 1; //very odd that this doesnt work but atomicAdd does... I wonder why
//long story short... it's basically because having synchronized threads
//at this point doesnt prevent race condiions on the increment itself
// __syncthreads();
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
}
}
/*
SDH kernel 2.0 - now the goal is to make a better kernel, that is at least 10x faster (at first)
*/
__global__ void PDH_kernel2(bucket* d_histogram, atom* d_atom_list, long long acnt, double res)
{
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
double report_running_time_GPU() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for GPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket* histogram){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
//printf("args are %d and %f\n", PDH_acnt, PDH_res);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/* start counting time */
printf("Starting CPU...\n");
gettimeofday(&startTime, &Idunno);
/* call CPU single thread version to compute the histogram */
PDH_baseline();
/* check the total running time */
report_running_time();
/* print out the histogram */
output_histogram(histogram);
printf("Starting GPU...\n");
//cudaDeviceReset();
//gpu code--------------------------------------------------------------------------------
h_gpu_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
//copy the atomlist over from host to device
cudaMalloc((void**)&d_atom_list, sizeof(atom)*PDH_acnt);
cudaMemcpy(d_atom_list, atom_list, sizeof(atom)*PDH_acnt, cudaMemcpyHostToDevice);
//allocate the histogram data on the device
cudaMalloc((void**)&d_gpu_histogram, sizeof(bucket)*num_buckets);
cudaMemcpy(d_gpu_histogram, h_gpu_histogram, sizeof(bucket)*num_buckets,cudaMemcpyHostToDevice);
//start the timer
gettimeofday(&startTime, &Idunno);
//run the kernel
PDH_kernel<<<ceil(PDH_acnt/256.0), 256>>>(d_gpu_histogram, d_atom_list, PDH_acnt, PDH_res);
//copy the histogram results back from gpu over to cpu
cudaMemcpy(h_gpu_histogram, d_gpu_histogram, sizeof(bucket)*num_buckets, cudaMemcpyDeviceToHost);
//check total running time
report_running_time_GPU();
//print out the resulting histogram from the GPU
output_histogram(h_gpu_histogram);
//difference calculation--------------------------------------------------------------------------------
printf("Difference: \n");
diff_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
int bi;
for(bi = 0; bi < num_buckets; bi++)
{
diff_histogram[bi].d_cnt = histogram[bi].d_cnt - h_gpu_histogram[bi].d_cnt;
}
output_histogram(diff_histogram);
cudaFree(d_gpu_histogram);
cudaFree(d_atom_list);
free(histogram);
free(atom_list);
free(h_gpu_histogram);
free(diff_histogram);
return 0;
} | code for sm_80
Function : _Z11PDH_kernel2P10hist_entryP8atomdescxd
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z10PDH_kernelP10hist_entryP8atomdescxd
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R8, R8, c[0x0][0x0], R3 ; /* 0x0000000008087a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R8, c[0x0][0x170], PT ; /* 0x00005c0008007a0c */
/* 0x000fe40003f06070 */
/*0050*/ SHF.R.S32.HI R2, RZ, 0x1f, R8 ; /* 0x0000001fff027819 */
/* 0x000fc80000011408 */
/*0060*/ ISETP.GE.AND.EX P0, PT, R2, c[0x0][0x174], PT, P0 ; /* 0x00005d0002007a0c */
/* 0x000fda0003f06300 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ IADD3 R0, R8, 0x1, RZ ; /* 0x0000000108007810 */
/* 0x000fc80007ffe0ff */
/*0090*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fe40003f06070 */
/*00a0*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */
/* 0x000fc80000011400 */
/*00b0*/ ISETP.GE.AND.EX P0, PT, R3, c[0x0][0x174], PT, P0 ; /* 0x00005d0003007a0c */
/* 0x000fda0003f06300 */
/*00c0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00d0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x18 ; /* 0x00000018ff097424 */
/* 0x000fe200078e00ff */
/*00e0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00f0*/ IMAD R5, R2, 0x18, RZ ; /* 0x0000001802057824 */
/* 0x000fe400078e02ff */
/*0100*/ IMAD.WIDE.U32 R8, R8, R9, c[0x0][0x168] ; /* 0x00005a0008087625 */
/* 0x000fc800078e0009 */
/*0110*/ IMAD.MOV.U32 R11, RZ, RZ, R3 ; /* 0x000000ffff0b7224 */
/* 0x000fe400078e0003 */
/*0120*/ IMAD.MOV.U32 R10, RZ, RZ, R0 ; /* 0x000000ffff0a7224 */
/* 0x000fe400078e0000 */
/*0130*/ IMAD.IADD R9, R9, 0x1, R5 ; /* 0x0000000109097824 */
/* 0x000fc800078e0205 */
/*0140*/ IMAD.MOV.U32 R3, RZ, RZ, 0x18 ; /* 0x00000018ff037424 */
/* 0x001fe200078e00ff */
/*0150*/ LDG.E.64 R12, [R8.64+0x8] ; /* 0x00000804080c7981 */
/* 0x000ea2000c1e1b00 */
/*0160*/ IMAD R11, R11, 0x18, RZ ; /* 0x000000180b0b7824 */
/* 0x000fe400078e02ff */
/*0170*/ IMAD.WIDE.U32 R2, R10, R3, c[0x0][0x168] ; /* 0x00005a000a027625 */
/* 0x000fe200078e0003 */
/*0180*/ LDG.E.64 R6, [R8.64] ; /* 0x0000000408067981 */
/* 0x000ee6000c1e1b00 */
/*0190*/ IMAD.IADD R3, R3, 0x1, R11 ; /* 0x0000000103037824 */
/* 0x000fe200078e020b */
/*01a0*/ LDG.E.64 R16, [R8.64+0x10] ; /* 0x0000100408107981 */
/* 0x000f28000c1e1b00 */
/*01b0*/ LDG.E.64 R10, [R2.64+0x8] ; /* 0x00000804020a7981 */
/* 0x000ea8000c1e1b00 */
/*01c0*/ LDG.E.64 R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000ee8000c1e1b00 */
/*01d0*/ LDG.E.64 R14, [R2.64+0x10] ; /* 0x00001004020e7981 */
/* 0x000f22000c1e1b00 */
/*01e0*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*01f0*/ BSSY B0, 0x3b0 ; /* 0x000001b000007945 */
/* 0x000fe20003800000 */
/*0200*/ DADD R10, -R10, R12 ; /* 0x000000000a0a7229 */
/* 0x004e08000000010c */
/*0210*/ DADD R4, -R4, R6 ; /* 0x0000000004047229 */
/* 0x008fc80000000106 */
/*0220*/ DMUL R10, R10, R10 ; /* 0x0000000a0a0a7228 */
/* 0x001e080000000000 */
/*0230*/ DADD R14, -R14, R16 ; /* 0x000000000e0e7229 */
/* 0x010fc80000000110 */
/*0240*/ DFMA R6, R4, R4, R10 ; /* 0x000000040406722b */
/* 0x001e0c000000000a */
/*0250*/ DFMA R6, R14, R14, R6 ; /* 0x0000000e0e06722b */
/* 0x001e0c0000000006 */
/*0260*/ MUFU.RSQ64H R5, R7 ; /* 0x0000000700057308 */
/* 0x001e280000001c00 */
/*0270*/ IADD3 R4, R7, -0x3500000, RZ ; /* 0xfcb0000007047810 */
/* 0x000fe20007ffe0ff */
/*0280*/ IMAD.MOV.U32 R12, RZ, RZ, 0x0 ; /* 0x00000000ff0c7424 */
/* 0x000fe400078e00ff */
/*0290*/ IMAD.MOV.U32 R13, RZ, RZ, 0x3fd80000 ; /* 0x3fd80000ff0d7424 */
/* 0x000fc600078e00ff */
/*02a0*/ DMUL R10, R4, R4 ; /* 0x00000004040a7228 */
/* 0x001e0c0000000000 */
/*02b0*/ DFMA R10, R6, -R10, 1 ; /* 0x3ff00000060a742b */
/* 0x001e0c000000080a */
/*02c0*/ DFMA R2, R10, R12, 0.5 ; /* 0x3fe000000a02742b */
/* 0x001fc8000000000c */
/*02d0*/ DMUL R10, R4, R10 ; /* 0x0000000a040a7228 */
/* 0x000e220000000000 */
/*02e0*/ ISETP.GE.U32.AND P0, PT, R4, 0x7ca00000, PT ; /* 0x7ca000000400780c */
/* 0x000fca0003f06070 */
/*02f0*/ DFMA R12, R2, R10, R4 ; /* 0x0000000a020c722b */
/* 0x001e0c0000000004 */
/*0300*/ DMUL R14, R6, R12 ; /* 0x0000000c060e7228 */
/* 0x001e080000000000 */
/*0310*/ IADD3 R11, R13, -0x100000, RZ ; /* 0xfff000000d0b7810 */
/* 0x000fe20007ffe0ff */
/*0320*/ IMAD.MOV.U32 R10, RZ, RZ, R12 ; /* 0x000000ffff0a7224 */
/* 0x000fe200078e000c */
/*0330*/ DFMA R16, R14, -R14, R6 ; /* 0x8000000e0e10722b */
/* 0x001e0c0000000006 */
/*0340*/ DFMA R2, R16, R10, R14 ; /* 0x0000000a1002722b */
/* 0x001062000000000e */
/*0350*/ @!P0 BRA 0x3a0 ; /* 0x0000004000008947 */
/* 0x000fea0003800000 */
/*0360*/ MOV R2, 0x380 ; /* 0x0000038000027802 */
/* 0x002fca0000000f00 */
/*0370*/ CALL.REL.NOINC 0xbd0 ; /* 0x0000085000007944 */
/* 0x001fea0003c00000 */
/*0380*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */
/* 0x002fe400078e0004 */
/*0390*/ IMAD.MOV.U32 R3, RZ, RZ, R5 ; /* 0x000000ffff037224 */
/* 0x000fe400078e0005 */
/*03a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*03b0*/ MUFU.RCP64H R5, c[0x0][0x17c] ; /* 0x00005f0000057b08 */
/* 0x000ea20000001800 */
/*03c0*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff0a7624 */
/* 0x001fe200078e00ff */
/*03d0*/ FSETP.GEU.AND P1, PT, |R3|, 6.5827683646048100446e-37, PT ; /* 0x036000000300780b */
/* 0x002fe20003f2e200 */
/*03e0*/ IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff0b7624 */
/* 0x000fe200078e00ff */
/*03f0*/ BSSY B0, 0x510 ; /* 0x0000011000007945 */
/* 0x000fe20003800000 */
/*0400*/ IMAD.MOV.U32 R4, RZ, RZ, 0x1 ; /* 0x00000001ff047424 */
/* 0x000fcc00078e00ff */
/*0410*/ DFMA R6, R4, -R10, 1 ; /* 0x3ff000000406742b */
/* 0x004e0c000000080a */
/*0420*/ DFMA R6, R6, R6, R6 ; /* 0x000000060606722b */
/* 0x001e0c0000000006 */
/*0430*/ DFMA R6, R4, R6, R4 ; /* 0x000000060406722b */
/* 0x001e0c0000000004 */
/*0440*/ DFMA R4, R6, -R10, 1 ; /* 0x3ff000000604742b */
/* 0x001e0c000000080a */
/*0450*/ DFMA R4, R6, R4, R6 ; /* 0x000000040604722b */
/* 0x001e0c0000000006 */
/*0460*/ DMUL R6, R4, R2 ; /* 0x0000000204067228 */
/* 0x001e0c0000000000 */
/*0470*/ DFMA R10, R6, -c[0x0][0x178], R2 ; /* 0x80005e00060a7a2b */
/* 0x001e0c0000000002 */
/*0480*/ DFMA R4, R4, R10, R6 ; /* 0x0000000a0404722b */
/* 0x001e140000000006 */
/*0490*/ FFMA R6, RZ, c[0x0][0x17c], R5 ; /* 0x00005f00ff067a23 */
/* 0x001fca0000000005 */
/*04a0*/ FSETP.GT.AND P0, PT, |R6|, 1.469367938527859385e-39, PT ; /* 0x001000000600780b */
/* 0x000fda0003f04200 */
/*04b0*/ @P0 BRA P1, 0x500 ; /* 0x0000004000000947 */
/* 0x000fea0000800000 */
/*04c0*/ IMAD.MOV.U32 R4, RZ, RZ, R2 ; /* 0x000000ffff047224 */
/* 0x000fe200078e0002 */
/*04d0*/ MOV R16, 0x500 ; /* 0x0000050000107802 */
/* 0x000fe20000000f00 */
/*04e0*/ IMAD.MOV.U32 R5, RZ, RZ, R3 ; /* 0x000000ffff057224 */
/* 0x000fe400078e0003 */
/*04f0*/ CALL.REL.NOINC 0x5e0 ; /* 0x000000e000007944 */
/* 0x000fea0003c00000 */
/*0500*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0510*/ F2I.F64.TRUNC R2, R4 ; /* 0x0000000400027311 */
/* 0x000e22000030d100 */
/*0520*/ IADD3 R10, R0, 0x1, RZ ; /* 0x00000001000a7810 */
/* 0x000fe20007ffe0ff */
/*0530*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */
/* 0x000fe400078e00ff */
/*0540*/ IMAD.MOV.U32 R6, RZ, RZ, 0x1 ; /* 0x00000001ff067424 */
/* 0x000fe200078e00ff */
/*0550*/ ISETP.GE.U32.AND P0, PT, R10, c[0x0][0x170], PT ; /* 0x00005c000a007a0c */
/* 0x000fe20003f06070 */
/*0560*/ IMAD.MOV.U32 R7, RZ, RZ, 0x0 ; /* 0x00000000ff077424 */
/* 0x000fe200078e00ff */
/*0570*/ SHF.R.S32.HI R11, RZ, 0x1f, R10 ; /* 0x0000001fff0b7819 */
/* 0x000fc8000001140a */
/*0580*/ ISETP.GE.AND.EX P0, PT, R11, c[0x0][0x174], PT, P0 ; /* 0x00005d000b007a0c */
/* 0x000fe20003f06300 */
/*0590*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x001fca00078e0203 */
/*05a0*/ RED.E.ADD.64.STRONG.GPU [R2.64], R6 ; /* 0x000000060200798e */
/* 0x0001e2000c10e584 */
/*05b0*/ IMAD.MOV.U32 R0, RZ, RZ, R10 ; /* 0x000000ffff007224 */
/* 0x000fcc00078e000a */
/*05c0*/ @!P0 BRA 0x140 ; /* 0xfffffb7000008947 */
/* 0x000fea000383ffff */
/*05d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*05e0*/ IMAD.MOV.U32 R18, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff127624 */
/* 0x000fe200078e00ff */
/*05f0*/ FSETP.GEU.AND P2, PT, |R5|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000000500780b */
/* 0x040fe20003f4e200 */
/*0600*/ IMAD.MOV.U32 R17, RZ, RZ, 0x1ca00000 ; /* 0x1ca00000ff117424 */
/* 0x000fe200078e00ff */
/*0610*/ LOP3.LUT R20, R5, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000005147812 */
/* 0x000fe200078ec0ff */
/*0620*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */
/* 0x000fe200078e00ff */
/*0630*/ FSETP.GEU.AND P0, PT, |R18|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000001200780b */
/* 0x040fe20003f0e200 */
/*0640*/ BSSY B1, 0xb90 ; /* 0x0000054000017945 */
/* 0x000fe20003800000 */
/*0650*/ LOP3.LUT R6, R18.reuse, 0x800fffff, RZ, 0xc0, !PT ; /* 0x800fffff12067812 */
/* 0x040fe400078ec0ff */
/*0660*/ LOP3.LUT R19, R18, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000012137812 */
/* 0x000fe400078ec0ff */
/*0670*/ LOP3.LUT R7, R6, 0x3ff00000, RZ, 0xfc, !PT ; /* 0x3ff0000006077812 */
/* 0x000fe200078efcff */
/*0680*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff067624 */
/* 0x000fe200078e00ff */
/*0690*/ ISETP.GE.U32.AND P1, PT, R20, R19, PT ; /* 0x000000131400720c */
/* 0x000fe20003f26070 */
/*06a0*/ IMAD.MOV.U32 R22, RZ, RZ, R19 ; /* 0x000000ffff167224 */
/* 0x000fe200078e0013 */
/*06b0*/ MOV R21, R20 ; /* 0x0000001400157202 */
/* 0x000fc40000000f00 */
/*06c0*/ SEL R17, R17, 0x63400000, !P1 ; /* 0x6340000011117807 */
/* 0x000fe20004800000 */
/*06d0*/ @!P0 IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff028624 */
/* 0x000fe400078e00ff */
/*06e0*/ @!P0 IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff038624 */
/* 0x000fe200078e00ff */
/*06f0*/ @!P2 LOP3.LUT R12, R17, 0x80000000, R5, 0xf8, !PT ; /* 0x80000000110ca812 */
/* 0x000fc800078ef805 */
/*0700*/ @!P2 LOP3.LUT R13, R12, 0x100000, RZ, 0xfc, !PT ; /* 0x001000000c0da812 */
/* 0x000fe200078efcff */
/*0710*/ @!P0 DMUL R6, R2, 8.98846567431157953865e+307 ; /* 0x7fe0000002068828 */
/* 0x000e220000000000 */
/*0720*/ @!P2 IMAD.MOV.U32 R12, RZ, RZ, RZ ; /* 0x000000ffff0ca224 */
/* 0x000fca00078e00ff */
/*0730*/ MUFU.RCP64H R11, R7 ; /* 0x00000007000b7308 */
/* 0x001e280000001800 */
/*0740*/ @!P0 LOP3.LUT R22, R7, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000007168812 */
/* 0x000fc800078ec0ff */
/*0750*/ IADD3 R23, R22, -0x1, RZ ; /* 0xffffffff16177810 */
/* 0x000fe20007ffe0ff */
/*0760*/ DFMA R2, R10, -R6, 1 ; /* 0x3ff000000a02742b */
/* 0x001e0c0000000806 */
/*0770*/ DFMA R14, R2, R2, R2 ; /* 0x00000002020e722b */
/* 0x0010640000000002 */
/*0780*/ LOP3.LUT R3, R17, 0x800fffff, R5, 0xf8, !PT ; /* 0x800fffff11037812 */
/* 0x001fe200078ef805 */
/*0790*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */
/* 0x000fc600078e0004 */
/*07a0*/ DFMA R10, R10, R14, R10 ; /* 0x0000000e0a0a722b */
/* 0x002e08000000000a */
/*07b0*/ @!P2 DFMA R2, R2, 2, -R12 ; /* 0x400000000202a82b */
/* 0x000e48000000080c */
/*07c0*/ DFMA R12, R10, -R6, 1 ; /* 0x3ff000000a0c742b */
/* 0x001e0c0000000806 */
/*07d0*/ @!P2 LOP3.LUT R21, R3, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff000000315a812 */
/* 0x002fe200078ec0ff */
/*07e0*/ DFMA R10, R10, R12, R10 ; /* 0x0000000c0a0a722b */
/* 0x001e06000000000a */
/*07f0*/ IADD3 R14, R21, -0x1, RZ ; /* 0xffffffff150e7810 */
/* 0x000fc60007ffe0ff */
/*0800*/ DMUL R12, R10, R2 ; /* 0x000000020a0c7228 */
/* 0x001e220000000000 */
/*0810*/ ISETP.GT.U32.AND P0, PT, R14, 0x7feffffe, PT ; /* 0x7feffffe0e00780c */
/* 0x000fc80003f04070 */
/*0820*/ ISETP.GT.U32.OR P0, PT, R23, 0x7feffffe, P0 ; /* 0x7feffffe1700780c */
/* 0x000fe20000704470 */
/*0830*/ DFMA R14, R12, -R6, R2 ; /* 0x800000060c0e722b */
/* 0x001e0c0000000002 */
/*0840*/ DFMA R14, R10, R14, R12 ; /* 0x0000000e0a0e722b */
/* 0x00104c000000000c */
/*0850*/ @P0 BRA 0xa00 ; /* 0x000001a000000947 */
/* 0x000fea0003800000 */
/*0860*/ IMAD.IADD R19, R20, 0x1, -R19 ; /* 0x0000000114137824 */
/* 0x003fca00078e0a13 */
/*0870*/ IMNMX R19, R19, -0x46a00000, !PT ; /* 0xb960000013137817 */
/* 0x000fc80007800200 */
/*0880*/ IMNMX R4, R19, 0x46a00000, PT ; /* 0x46a0000013047817 */
/* 0x000fca0003800200 */
/*0890*/ IMAD.IADD R17, R4, 0x1, -R17 ; /* 0x0000000104117824 */
/* 0x000fe400078e0a11 */
/*08a0*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fc600078e00ff */
/*08b0*/ IADD3 R5, R17, 0x7fe00000, RZ ; /* 0x7fe0000011057810 */
/* 0x000fcc0007ffe0ff */
/*08c0*/ DMUL R10, R14, R4 ; /* 0x000000040e0a7228 */
/* 0x000e140000000000 */
/*08d0*/ FSETP.GTU.AND P0, PT, |R11|, 1.469367938527859385e-39, PT ; /* 0x001000000b00780b */
/* 0x001fda0003f0c200 */
/*08e0*/ @P0 BRA 0xb80 ; /* 0x0000029000000947 */
/* 0x000fea0003800000 */
/*08f0*/ DFMA R2, R14, -R6, R2 ; /* 0x800000060e02722b */
/* 0x000e220000000002 */
/*0900*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fd200078e00ff */
/*0910*/ FSETP.NEU.AND P0, PT, R3.reuse, RZ, PT ; /* 0x000000ff0300720b */
/* 0x041fe40003f0d000 */
/*0920*/ LOP3.LUT R2, R3, c[0x0][0x17c], RZ, 0x3c, !PT ; /* 0x00005f0003027a12 */
/* 0x000fc800078e3cff */
/*0930*/ LOP3.LUT R7, R2, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000002077812 */
/* 0x000fc800078ec0ff */
/*0940*/ LOP3.LUT R5, R7, R5, RZ, 0xfc, !PT ; /* 0x0000000507057212 */
/* 0x000fc600078efcff */
/*0950*/ @!P0 BRA 0xb80 ; /* 0x0000022000008947 */
/* 0x000fea0003800000 */
/*0960*/ IMAD.MOV R3, RZ, RZ, -R17 ; /* 0x000000ffff037224 */
/* 0x000fe200078e0a11 */
/*0970*/ DMUL.RP R4, R14, R4 ; /* 0x000000040e047228 */
/* 0x000e220000008000 */
/*0980*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x000fcc00078e00ff */
/*0990*/ DFMA R2, R10, -R2, R14 ; /* 0x800000020a02722b */
/* 0x000e46000000000e */
/*09a0*/ LOP3.LUT R7, R5, R7, RZ, 0x3c, !PT ; /* 0x0000000705077212 */
/* 0x001fc600078e3cff */
/*09b0*/ IADD3 R2, -R17, -0x43300000, RZ ; /* 0xbcd0000011027810 */
/* 0x002fc80007ffe1ff */
/*09c0*/ FSETP.NEU.AND P0, PT, |R3|, R2, PT ; /* 0x000000020300720b */
/* 0x000fc80003f0d200 */
/*09d0*/ FSEL R10, R4, R10, !P0 ; /* 0x0000000a040a7208 */
/* 0x000fe40004000000 */
/*09e0*/ FSEL R11, R7, R11, !P0 ; /* 0x0000000b070b7208 */
/* 0x000fe20004000000 */
/*09f0*/ BRA 0xb80 ; /* 0x0000018000007947 */
/* 0x000fea0003800000 */
/*0a00*/ DSETP.NAN.AND P0, PT, R4, R4, PT ; /* 0x000000040400722a */
/* 0x003e1c0003f08000 */
/*0a10*/ @P0 BRA 0xb60 ; /* 0x0000014000000947 */
/* 0x001fea0003800000 */
/*0a20*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff027624 */
/* 0x000fe400078e00ff */
/*0a30*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff037624 */
/* 0x000fcc00078e00ff */
/*0a40*/ DSETP.NAN.AND P0, PT, R2, c[0x0][0x178], PT ; /* 0x00005e000200762a */
/* 0x000e1c0003f08000 */
/*0a50*/ @P0 BRA 0xb30 ; /* 0x000000d000000947 */
/* 0x001fea0003800000 */
/*0a60*/ ISETP.NE.AND P0, PT, R21, R22, PT ; /* 0x000000161500720c */
/* 0x000fe20003f05270 */
/*0a70*/ IMAD.MOV.U32 R10, RZ, RZ, 0x0 ; /* 0x00000000ff0a7424 */
/* 0x000fe400078e00ff */
/*0a80*/ IMAD.MOV.U32 R11, RZ, RZ, -0x80000 ; /* 0xfff80000ff0b7424 */
/* 0x000fd400078e00ff */
/*0a90*/ @!P0 BRA 0xb80 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0aa0*/ ISETP.NE.AND P0, PT, R21, 0x7ff00000, PT ; /* 0x7ff000001500780c */
/* 0x000fe40003f05270 */
/*0ab0*/ LOP3.LUT R4, R5, c[0x0][0x17c], RZ, 0x3c, !PT ; /* 0x00005f0005047a12 */
/* 0x000fe400078e3cff */
/*0ac0*/ ISETP.EQ.OR P0, PT, R22, RZ, !P0 ; /* 0x000000ff1600720c */
/* 0x000fe40004702670 */
/*0ad0*/ LOP3.LUT R11, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x80000000040b7812 */
/* 0x000fd600078ec0ff */
/*0ae0*/ @P0 LOP3.LUT R2, R11, 0x7ff00000, RZ, 0xfc, !PT ; /* 0x7ff000000b020812 */
/* 0x000fe200078efcff */
/*0af0*/ @!P0 IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a8224 */
/* 0x000fe400078e00ff */
/*0b00*/ @P0 IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a0224 */
/* 0x000fe400078e00ff */
/*0b10*/ @P0 IMAD.MOV.U32 R11, RZ, RZ, R2 ; /* 0x000000ffff0b0224 */
/* 0x000fe200078e0002 */
/*0b20*/ BRA 0xb80 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*0b30*/ LOP3.LUT R11, R18, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000120b7812 */
/* 0x000fe200078efcff */
/*0b40*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff0a7624 */
/* 0x000fe200078e00ff */
/*0b50*/ BRA 0xb80 ; /* 0x0000002000007947 */
/* 0x000fea0003800000 */
/*0b60*/ LOP3.LUT R11, R5, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000050b7812 */
/* 0x000fe400078efcff */
/*0b70*/ MOV R10, R4 ; /* 0x00000004000a7202 */
/* 0x000fc40000000f00 */
/*0b80*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0b90*/ IMAD.MOV.U32 R17, RZ, RZ, 0x0 ; /* 0x00000000ff117424 */
/* 0x000fe400078e00ff */
/*0ba0*/ IMAD.MOV.U32 R4, RZ, RZ, R10 ; /* 0x000000ffff047224 */
/* 0x000fc400078e000a */
/*0bb0*/ IMAD.MOV.U32 R5, RZ, RZ, R11 ; /* 0x000000ffff057224 */
/* 0x000fe200078e000b */
/*0bc0*/ RET.REL.NODEC R16 0x0 ; /* 0xfffff43010007950 */
/* 0x000fec0003c3ffff */
/*0bd0*/ ISETP.GE.U32.AND P0, PT, R4, -0x3400000, PT ; /* 0xfcc000000400780c */
/* 0x000fe20003f06070 */
/*0be0*/ BSSY B1, 0xe50 ; /* 0x0000026000017945 */
/* 0x000fe20003800000 */
/*0bf0*/ IMAD.MOV.U32 R10, RZ, RZ, R12 ; /* 0x000000ffff0a7224 */
/* 0x000fe400078e000c */
/*0c00*/ IMAD.MOV.U32 R4, RZ, RZ, R16 ; /* 0x000000ffff047224 */
/* 0x000fe400078e0010 */
/*0c10*/ IMAD.MOV.U32 R5, RZ, RZ, R17 ; /* 0x000000ffff057224 */
/* 0x000fce00078e0011 */
/*0c20*/ @!P0 BRA 0xcb0 ; /* 0x0000008000008947 */
/* 0x000fea0003800000 */
/*0c30*/ DFMA.RM R4, R4, R10, R14 ; /* 0x0000000a0404722b */
/* 0x000e14000000400e */
/*0c40*/ IADD3 R10, P0, R4, 0x1, RZ ; /* 0x00000001040a7810 */
/* 0x001fca0007f1e0ff */
/*0c50*/ IMAD.X R11, RZ, RZ, R5, P0 ; /* 0x000000ffff0b7224 */
/* 0x000fcc00000e0605 */
/*0c60*/ DFMA.RP R6, -R4, R10, R6 ; /* 0x0000000a0406722b */
/* 0x000e0c0000008106 */
/*0c70*/ DSETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600722a */
/* 0x001e0c0003f04000 */
/*0c80*/ FSEL R4, R10, R4, P0 ; /* 0x000000040a047208 */
/* 0x001fe40000000000 */
/*0c90*/ FSEL R5, R11, R5, P0 ; /* 0x000000050b057208 */
/* 0x000fe20000000000 */
/*0ca0*/ BRA 0xe40 ; /* 0x0000019000007947 */
/* 0x000fea0003800000 */
/*0cb0*/ DSETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600722a */
/* 0x000e1c0003f05000 */
/*0cc0*/ @!P0 BRA 0xe30 ; /* 0x0000016000008947 */
/* 0x001fea0003800000 */
/*0cd0*/ ISETP.GE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fda0003f06270 */
/*0ce0*/ @!P0 IMAD.MOV.U32 R4, RZ, RZ, 0x0 ; /* 0x00000000ff048424 */
/* 0x000fe400078e00ff */
/*0cf0*/ @!P0 IMAD.MOV.U32 R5, RZ, RZ, -0x80000 ; /* 0xfff80000ff058424 */
/* 0x000fe200078e00ff */
/*0d00*/ @!P0 BRA 0xe40 ; /* 0x0000013000008947 */
/* 0x000fea0003800000 */
/*0d10*/ ISETP.GT.AND P0, PT, R7, 0x7fefffff, PT ; /* 0x7fefffff0700780c */
/* 0x000fda0003f04270 */
/*0d20*/ @P0 BRA 0xe30 ; /* 0x0000010000000947 */
/* 0x000fea0003800000 */
/*0d30*/ DMUL R4, R6, 8.11296384146066816958e+31 ; /* 0x4690000006047828 */
/* 0x0000620000000000 */
/*0d40*/ IMAD.MOV.U32 R12, RZ, RZ, 0x0 ; /* 0x00000000ff0c7424 */
/* 0x000fe400078e00ff */
/*0d50*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x001fe400078e00ff */
/*0d60*/ IMAD.MOV.U32 R13, RZ, RZ, 0x3fd80000 ; /* 0x3fd80000ff0d7424 */
/* 0x000fe200078e00ff */
/*0d70*/ MUFU.RSQ64H R7, R5 ; /* 0x0000000500077308 */
/* 0x002e260000001c00 */
/*0d80*/ DMUL R10, R6, R6 ; /* 0x00000006060a7228 */
/* 0x001e0c0000000000 */
/*0d90*/ DFMA R10, R4, -R10, 1 ; /* 0x3ff00000040a742b */
/* 0x001e0c000000080a */
/*0da0*/ DFMA R12, R10, R12, 0.5 ; /* 0x3fe000000a0c742b */
/* 0x001fc8000000000c */
/*0db0*/ DMUL R10, R6, R10 ; /* 0x0000000a060a7228 */
/* 0x000e0c0000000000 */
/*0dc0*/ DFMA R10, R12, R10, R6 ; /* 0x0000000a0c0a722b */
/* 0x001e0c0000000006 */
/*0dd0*/ DMUL R6, R4, R10 ; /* 0x0000000a04067228 */
/* 0x0010480000000000 */
/*0de0*/ IADD3 R11, R11, -0x100000, RZ ; /* 0xfff000000b0b7810 */
/* 0x001fe40007ffe0ff */
/*0df0*/ DFMA R12, R6, -R6, R4 ; /* 0x80000006060c722b */
/* 0x002e0c0000000004 */
/*0e00*/ DFMA R4, R10, R12, R6 ; /* 0x0000000c0a04722b */
/* 0x001e140000000006 */
/*0e10*/ IADD3 R5, R5, -0x3500000, RZ ; /* 0xfcb0000005057810 */
/* 0x001fe20007ffe0ff */
/*0e20*/ BRA 0xe40 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0e30*/ DADD R4, R6, R6 ; /* 0x0000000006047229 */
/* 0x00004c0000000006 */
/*0e40*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0e50*/ IMAD.MOV.U32 R3, RZ, RZ, 0x0 ; /* 0x00000000ff037424 */
/* 0x000fc800078e00ff */
/*0e60*/ RET.REL.NODEC R2 0x0 ; /* 0xfffff19002007950 */
/* 0x000fea0003c3ffff */
/*0e70*/ BRA 0xe70; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0e80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ea0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0eb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ec0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ed0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ee0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ef0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* ==================================================================
Programmer: Yicheng Tu (ytu@cse.usf.edu)
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH in the C4 lab machines
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
bucket * h_gpu_histogram;
bucket * d_gpu_histogram;
bucket * diff_histogram;
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
atom * d_atom_list;
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
//get cuda error
// void cuda_err(cudaError_t e, char in[])
// {
// if (e != cudaSuccess){
// printf("CUDA Error: %s, %s \n", in, cudaGetErrorString(e));
// exit(EXIT_FAILURE);
// }
// }
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for(i = 0; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = p2p_distance(i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
SDH kernel - a really crappy one
*/
__global__ void PDH_kernel(bucket* d_histogram, atom* d_atom_list, long long acnt, double res)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
int j, h_pos;
double dist;
double x1;
double x2;
double y1;
double y2;
double z1;
double z2;
if(id < acnt)
for(j = id+1; j < acnt; j++)
{
x1 = d_atom_list[id].x_pos;
x2 = d_atom_list[j].x_pos;
y1 = d_atom_list[id].y_pos;
y2 = d_atom_list[j].y_pos;
z1 = d_atom_list[id].z_pos;
z2 = d_atom_list[j].z_pos;
dist = sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
h_pos = (int) (dist / res);
// __syncthreads();
// d_histogram[h_pos].d_cnt += 1; //very odd that this doesnt work but atomicAdd does... I wonder why
//long story short... it's basically because having synchronized threads
//at this point doesnt prevent race condiions on the increment itself
// __syncthreads();
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
}
}
/*
SDH kernel 2.0 - now the goal is to make a better kernel, that is at least 10x faster (at first)
*/
__global__ void PDH_kernel2(bucket* d_histogram, atom* d_atom_list, long long acnt, double res)
{
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
double report_running_time_GPU() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for GPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket* histogram){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
//printf("args are %d and %f\n", PDH_acnt, PDH_res);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/* start counting time */
printf("Starting CPU...\n");
gettimeofday(&startTime, &Idunno);
/* call CPU single thread version to compute the histogram */
PDH_baseline();
/* check the total running time */
report_running_time();
/* print out the histogram */
output_histogram(histogram);
printf("Starting GPU...\n");
//cudaDeviceReset();
//gpu code--------------------------------------------------------------------------------
h_gpu_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
//copy the atomlist over from host to device
cudaMalloc((void**)&d_atom_list, sizeof(atom)*PDH_acnt);
cudaMemcpy(d_atom_list, atom_list, sizeof(atom)*PDH_acnt, cudaMemcpyHostToDevice);
//allocate the histogram data on the device
cudaMalloc((void**)&d_gpu_histogram, sizeof(bucket)*num_buckets);
cudaMemcpy(d_gpu_histogram, h_gpu_histogram, sizeof(bucket)*num_buckets,cudaMemcpyHostToDevice);
//start the timer
gettimeofday(&startTime, &Idunno);
//run the kernel
PDH_kernel<<<ceil(PDH_acnt/256.0), 256>>>(d_gpu_histogram, d_atom_list, PDH_acnt, PDH_res);
//copy the histogram results back from gpu over to cpu
cudaMemcpy(h_gpu_histogram, d_gpu_histogram, sizeof(bucket)*num_buckets, cudaMemcpyDeviceToHost);
//check total running time
report_running_time_GPU();
//print out the resulting histogram from the GPU
output_histogram(h_gpu_histogram);
//difference calculation--------------------------------------------------------------------------------
printf("Difference: \n");
diff_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
int bi;
for(bi = 0; bi < num_buckets; bi++)
{
diff_histogram[bi].d_cnt = histogram[bi].d_cnt - h_gpu_histogram[bi].d_cnt;
}
output_histogram(diff_histogram);
cudaFree(d_gpu_histogram);
cudaFree(d_atom_list);
free(histogram);
free(atom_list);
free(h_gpu_histogram);
free(diff_histogram);
return 0;
} | .file "tmpxft_0015a26d_00000000-6_proj1-tanmay.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2065:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2065:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z12p2p_distanceii
.type _Z12p2p_distanceii, @function
_Z12p2p_distanceii:
.LFB2057:
.cfi_startproc
endbr64
movq atom_list(%rip), %rax
movslq %edi, %rdi
leaq (%rdi,%rdi,2), %rdx
leaq (%rax,%rdx,8), %rdx
movslq %esi, %rsi
leaq (%rsi,%rsi,2), %rcx
leaq (%rax,%rcx,8), %rax
movsd (%rdx), %xmm1
subsd (%rax), %xmm1
movsd 8(%rdx), %xmm2
subsd 8(%rax), %xmm2
movsd 16(%rdx), %xmm0
subsd 16(%rax), %xmm0
mulsd %xmm1, %xmm1
mulsd %xmm2, %xmm2
addsd %xmm2, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
sqrtsd %xmm0, %xmm0
ret
.cfi_endproc
.LFE2057:
.size _Z12p2p_distanceii, .-_Z12p2p_distanceii
.globl _Z12PDH_baselinev
.type _Z12PDH_baselinev, @function
_Z12PDH_baselinev:
.LFB2058:
.cfi_startproc
endbr64
cmpq $0, PDH_acnt(%rip)
jle .L11
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl $1, %r13d
movl $0, %r14d
.L7:
movl %r14d, %r12d
movl %r13d, %ebx
cmpq %r13, PDH_acnt(%rip)
jle .L5
movq %r13, %rbp
.L6:
movl %ebx, %esi
movl %r12d, %edi
call _Z12p2p_distanceii
divsd PDH_res(%rip), %xmm0
cvttsd2sil %xmm0, %eax
cltq
movq histogram(%rip), %rdx
addq $1, (%rdx,%rax,8)
addl $1, %ebx
movq PDH_acnt(%rip), %rax
addq $1, %rbp
cmpq %rbp, %rax
jg .L6
addq $1, %r14
addq $1, %r13
cmpq %r14, %rax
jg .L7
.L5:
movl $0, %eax
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
movl $0, %eax
ret
.cfi_endproc
.LFE2058:
.size _Z12PDH_baselinev, .-_Z12PDH_baselinev
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Running time for CPU version: %ld.%06ld\n"
.text
.globl _Z19report_running_timev
.type _Z19report_running_timev, @function
_Z19report_running_timev:
.LFB2059:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
leaq Idunno(%rip), %rsi
leaq endTime(%rip), %rdi
call gettimeofday@PLT
movq endTime(%rip), %rbp
subq startTime(%rip), %rbp
movq 8+endTime(%rip), %rbx
subq 8+startTime(%rip), %rbx
js .L17
.L15:
movq %rbx, %rcx
movq %rbp, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtsi2sdq %rbx, %xmm0
divsd .LC1(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq %rbp, %xmm1
addsd %xmm1, %xmm0
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
subq $1, %rbp
addq $1000000, %rbx
jmp .L15
.cfi_endproc
.LFE2059:
.size _Z19report_running_timev, .-_Z19report_running_timev
.section .rodata.str1.8
.align 8
.LC2:
.string "Running time for GPU version: %ld.%06ld\n"
.text
.globl _Z23report_running_time_GPUv
.type _Z23report_running_time_GPUv, @function
_Z23report_running_time_GPUv:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
leaq Idunno(%rip), %rsi
leaq endTime(%rip), %rdi
call gettimeofday@PLT
movq endTime(%rip), %rbp
subq startTime(%rip), %rbp
movq 8+endTime(%rip), %rbx
subq 8+startTime(%rip), %rbx
js .L21
.L19:
movq %rbx, %rcx
movq %rbp, %rdx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtsi2sdq %rbx, %xmm0
divsd .LC1(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq %rbp, %xmm1
addsd %xmm1, %xmm0
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
subq $1, %rbp
addq $1000000, %rbx
jmp .L19
.cfi_endproc
.LFE2060:
.size _Z23report_running_time_GPUv, .-_Z23report_running_time_GPUv
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "\n%02d: "
.LC4:
.string "%15lld "
.LC5:
.string "\n T:%lld \n"
.LC6:
.string "| "
.text
.globl _Z16output_histogramP10hist_entry
.type _Z16output_histogramP10hist_entry, @function
_Z16output_histogramP10hist_entry:
.LFB2061:
.cfi_startproc
endbr64
cmpl $0, num_buckets(%rip)
jle .L30
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movl $0, %ebx
movl $0, %r12d
leaq .LC4(%rip), %r13
leaq .LC6(%rip), %r14
leaq .LC5(%rip), %r15
jmp .L27
.L34:
movl %ebx, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L24
.L35:
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L26:
addq $1, %rbx
cmpl %ebx, num_buckets(%rip)
jle .L33
.L27:
movslq %ebx, %rax
imulq $1717986919, %rax, %rax
sarq $33, %rax
movl %ebx, %edx
sarl $31, %edx
subl %edx, %eax
leal (%rax,%rax,4), %eax
cmpl %ebx, %eax
je .L34
.L24:
movq 0(%rbp,%rbx,8), %rdx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r12, %rdx
addq 0(%rbp,%rbx,8), %rdx
movq %rdx, %r12
movl num_buckets(%rip), %eax
subl $1, %eax
cmpl %ebx, %eax
je .L35
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L26
.L33:
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L30:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2061:
.size _Z16output_histogramP10hist_entry, .-_Z16output_histogramP10hist_entry
.globl _Z53__device_stub__Z10PDH_kernelP10hist_entryP8atomdescxdP10hist_entryP8atomdescxd
.type _Z53__device_stub__Z10PDH_kernelP10hist_entryP8atomdescxdP10hist_entryP8atomdescxd, @function
_Z53__device_stub__Z10PDH_kernelP10hist_entryP8atomdescxdP10hist_entryP8atomdescxd:
.LFB2087:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movsd %xmm0, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L40
.L36:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L41
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L40:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10PDH_kernelP10hist_entryP8atomdescxd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L36
.L41:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z53__device_stub__Z10PDH_kernelP10hist_entryP8atomdescxdP10hist_entryP8atomdescxd, .-_Z53__device_stub__Z10PDH_kernelP10hist_entryP8atomdescxdP10hist_entryP8atomdescxd
.globl _Z10PDH_kernelP10hist_entryP8atomdescxd
.type _Z10PDH_kernelP10hist_entryP8atomdescxd, @function
_Z10PDH_kernelP10hist_entryP8atomdescxd:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z53__device_stub__Z10PDH_kernelP10hist_entryP8atomdescxdP10hist_entryP8atomdescxd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z10PDH_kernelP10hist_entryP8atomdescxd, .-_Z10PDH_kernelP10hist_entryP8atomdescxd
.section .rodata.str1.1
.LC10:
.string "Starting CPU...\n"
.LC11:
.string "Starting GPU...\n"
.LC16:
.string "Difference: \n"
.text
.globl main
.type main, @function
main:
.LFB2062:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $40, %rsp
.cfi_def_cfa_offset 64
movq %rsi, %rbx
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
cltq
movq %rax, PDH_acnt(%rip)
movq 16(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, PDH_res(%rip)
movsd .LC7(%rip), %xmm1
divsd %xmm0, %xmm1
cvttsd2sil %xmm1, %edi
addl $1, %edi
movl %edi, num_buckets(%rip)
movslq %edi, %rdi
salq $3, %rdi
call malloc@PLT
movq %rax, histogram(%rip)
movq PDH_acnt(%rip), %rax
leaq (%rax,%rax,2), %rdi
salq $3, %rdi
call malloc@PLT
movq %rax, atom_list(%rip)
movl $1, %edi
call srand@PLT
cmpq $0, PDH_acnt(%rip)
jle .L45
movl $0, %ebx
movl $0, %ebp
.L46:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
divsd .LC8(%rip), %xmm0
mulsd .LC9(%rip), %xmm0
movq atom_list(%rip), %rax
movsd %xmm0, (%rax,%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
divsd .LC8(%rip), %xmm0
mulsd .LC9(%rip), %xmm0
movq atom_list(%rip), %rax
movsd %xmm0, 8(%rax,%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
divsd .LC8(%rip), %xmm0
mulsd .LC9(%rip), %xmm0
movq atom_list(%rip), %rax
movsd %xmm0, 16(%rax,%rbx)
addq $1, %rbp
addq $24, %rbx
cmpq %rbp, PDH_acnt(%rip)
jg .L46
.L45:
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq Idunno(%rip), %rbp
movq %rbp, %rsi
leaq startTime(%rip), %rbx
movq %rbx, %rdi
call gettimeofday@PLT
call _Z12PDH_baselinev
call _Z19report_running_timev
movq histogram(%rip), %rdi
call _Z16output_histogramP10hist_entry
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movslq num_buckets(%rip), %rdi
salq $3, %rdi
call malloc@PLT
movq %rax, h_gpu_histogram(%rip)
movq PDH_acnt(%rip), %rax
leaq (%rax,%rax,2), %rsi
salq $3, %rsi
leaq d_atom_list(%rip), %rdi
call cudaMalloc@PLT
movq PDH_acnt(%rip), %rax
leaq (%rax,%rax,2), %rdx
salq $3, %rdx
movl $1, %ecx
movq atom_list(%rip), %rsi
movq d_atom_list(%rip), %rdi
call cudaMemcpy@PLT
movslq num_buckets(%rip), %rsi
salq $3, %rsi
leaq d_gpu_histogram(%rip), %rdi
call cudaMalloc@PLT
movslq num_buckets(%rip), %rdx
salq $3, %rdx
movl $1, %ecx
movq h_gpu_histogram(%rip), %rsi
movq d_gpu_histogram(%rip), %rdi
call cudaMemcpy@PLT
movq %rbp, %rsi
movq %rbx, %rdi
call gettimeofday@PLT
movl $256, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
pxor %xmm0, %xmm0
cvtsi2sdq PDH_acnt(%rip), %xmm0
mulsd .LC12(%rip), %xmm0
movapd %xmm0, %xmm3
movsd .LC17(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC13(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L47
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC15(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L47:
cvttsd2siq %xmm3, %rax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl 28(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L54
.L48:
movslq num_buckets(%rip), %rdx
salq $3, %rdx
movl $2, %ecx
movq d_gpu_histogram(%rip), %rsi
movq h_gpu_histogram(%rip), %rdi
call cudaMemcpy@PLT
call _Z23report_running_time_GPUv
movq h_gpu_histogram(%rip), %rdi
call _Z16output_histogramP10hist_entry
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl num_buckets(%rip), %ebp
movslq %ebp, %rbx
salq $3, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, diff_histogram(%rip)
testl %ebp, %ebp
jle .L49
movl $0, %eax
.L50:
movq histogram(%rip), %rdx
movq (%rdx,%rax), %rdx
movq h_gpu_histogram(%rip), %rcx
subq (%rcx,%rax), %rdx
movq diff_histogram(%rip), %rcx
movq %rdx, (%rcx,%rax)
addq $8, %rax
cmpq %rax, %rbx
jne .L50
.L49:
movq diff_histogram(%rip), %rdi
call _Z16output_histogramP10hist_entry
movq d_gpu_histogram(%rip), %rdi
call cudaFree@PLT
movq d_atom_list(%rip), %rdi
call cudaFree@PLT
movq histogram(%rip), %rdi
call free@PLT
movq atom_list(%rip), %rdi
call free@PLT
movq h_gpu_histogram(%rip), %rdi
call free@PLT
movq diff_histogram(%rip), %rdi
call free@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L54:
.cfi_restore_state
movsd PDH_res(%rip), %xmm0
movq PDH_acnt(%rip), %rdx
movq d_atom_list(%rip), %rsi
movq d_gpu_histogram(%rip), %rdi
call _Z53__device_stub__Z10PDH_kernelP10hist_entryP8atomdescxdP10hist_entryP8atomdescxd
jmp .L48
.cfi_endproc
.LFE2062:
.size main, .-main
.globl _Z54__device_stub__Z11PDH_kernel2P10hist_entryP8atomdescxdP10hist_entryP8atomdescxd
.type _Z54__device_stub__Z11PDH_kernel2P10hist_entryP8atomdescxdP10hist_entryP8atomdescxd, @function
_Z54__device_stub__Z11PDH_kernel2P10hist_entryP8atomdescxdP10hist_entryP8atomdescxd:
.LFB2089:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movsd %xmm0, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L59
.L55:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L60
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L59:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11PDH_kernel2P10hist_entryP8atomdescxd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L55
.L60:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2089:
.size _Z54__device_stub__Z11PDH_kernel2P10hist_entryP8atomdescxdP10hist_entryP8atomdescxd, .-_Z54__device_stub__Z11PDH_kernel2P10hist_entryP8atomdescxdP10hist_entryP8atomdescxd
.globl _Z11PDH_kernel2P10hist_entryP8atomdescxd
.type _Z11PDH_kernel2P10hist_entryP8atomdescxd, @function
_Z11PDH_kernel2P10hist_entryP8atomdescxd:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z54__device_stub__Z11PDH_kernel2P10hist_entryP8atomdescxdP10hist_entryP8atomdescxd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _Z11PDH_kernel2P10hist_entryP8atomdescxd, .-_Z11PDH_kernel2P10hist_entryP8atomdescxd
.section .rodata.str1.8
.align 8
.LC18:
.string "_Z11PDH_kernel2P10hist_entryP8atomdescxd"
.align 8
.LC19:
.string "_Z10PDH_kernelP10hist_entryP8atomdescxd"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2092:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC18(%rip), %rdx
movq %rdx, %rcx
leaq _Z11PDH_kernel2P10hist_entryP8atomdescxd(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z10PDH_kernelP10hist_entryP8atomdescxd(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl endTime
.bss
.align 16
.type endTime, @object
.size endTime, 16
endTime:
.zero 16
.globl startTime
.align 16
.type startTime, @object
.size startTime, 16
startTime:
.zero 16
.globl Idunno
.align 8
.type Idunno, @object
.size Idunno, 8
Idunno:
.zero 8
.globl d_atom_list
.align 8
.type d_atom_list, @object
.size d_atom_list, 8
d_atom_list:
.zero 8
.globl atom_list
.align 8
.type atom_list, @object
.size atom_list, 8
atom_list:
.zero 8
.globl PDH_res
.align 8
.type PDH_res, @object
.size PDH_res, 8
PDH_res:
.zero 8
.globl num_buckets
.align 4
.type num_buckets, @object
.size num_buckets, 4
num_buckets:
.zero 4
.globl PDH_acnt
.align 8
.type PDH_acnt, @object
.size PDH_acnt, 8
PDH_acnt:
.zero 8
.globl diff_histogram
.align 8
.type diff_histogram, @object
.size diff_histogram, 8
diff_histogram:
.zero 8
.globl d_gpu_histogram
.align 8
.type d_gpu_histogram, @object
.size d_gpu_histogram, 8
d_gpu_histogram:
.zero 8
.globl h_gpu_histogram
.align 8
.type h_gpu_histogram, @object
.size h_gpu_histogram, 8
h_gpu_histogram:
.zero 8
.globl histogram
.align 8
.type histogram, @object
.size histogram, 8
histogram:
.zero 8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1093567616
.align 8
.LC7:
.long 0
.long 1088648064
.align 8
.LC8:
.long -4194304
.long 1105199103
.align 8
.LC9:
.long 0
.long 1087796736
.align 8
.LC12:
.long 0
.long 1064304640
.align 8
.LC13:
.long 0
.long 1127219200
.align 8
.LC15:
.long 0
.long 1072693248
.align 8
.LC17:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* ==================================================================
Programmer: Yicheng Tu (ytu@cse.usf.edu)
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH in the C4 lab machines
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
bucket * h_gpu_histogram;
bucket * d_gpu_histogram;
bucket * diff_histogram;
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
atom * d_atom_list;
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
//get cuda error
// void cuda_err(cudaError_t e, char in[])
// {
// if (e != cudaSuccess){
// printf("CUDA Error: %s, %s \n", in, cudaGetErrorString(e));
// exit(EXIT_FAILURE);
// }
// }
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for(i = 0; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = p2p_distance(i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
SDH kernel - a really crappy one
*/
__global__ void PDH_kernel(bucket* d_histogram, atom* d_atom_list, long long acnt, double res)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
int j, h_pos;
double dist;
double x1;
double x2;
double y1;
double y2;
double z1;
double z2;
if(id < acnt)
for(j = id+1; j < acnt; j++)
{
x1 = d_atom_list[id].x_pos;
x2 = d_atom_list[j].x_pos;
y1 = d_atom_list[id].y_pos;
y2 = d_atom_list[j].y_pos;
z1 = d_atom_list[id].z_pos;
z2 = d_atom_list[j].z_pos;
dist = sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
h_pos = (int) (dist / res);
// __syncthreads();
// d_histogram[h_pos].d_cnt += 1; //very odd that this doesnt work but atomicAdd does... I wonder why
//long story short... it's basically because having synchronized threads
//at this point doesnt prevent race condiions on the increment itself
// __syncthreads();
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
}
}
/*
SDH kernel 2.0 - now the goal is to make a better kernel, that is at least 10x faster (at first)
*/
__global__ void PDH_kernel2(bucket* d_histogram, atom* d_atom_list, long long acnt, double res)
{
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
double report_running_time_GPU() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for GPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket* histogram){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
//printf("args are %d and %f\n", PDH_acnt, PDH_res);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/* start counting time */
printf("Starting CPU...\n");
gettimeofday(&startTime, &Idunno);
/* call CPU single thread version to compute the histogram */
PDH_baseline();
/* check the total running time */
report_running_time();
/* print out the histogram */
output_histogram(histogram);
printf("Starting GPU...\n");
//cudaDeviceReset();
//gpu code--------------------------------------------------------------------------------
h_gpu_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
//copy the atomlist over from host to device
cudaMalloc((void**)&d_atom_list, sizeof(atom)*PDH_acnt);
cudaMemcpy(d_atom_list, atom_list, sizeof(atom)*PDH_acnt, cudaMemcpyHostToDevice);
//allocate the histogram data on the device
cudaMalloc((void**)&d_gpu_histogram, sizeof(bucket)*num_buckets);
cudaMemcpy(d_gpu_histogram, h_gpu_histogram, sizeof(bucket)*num_buckets,cudaMemcpyHostToDevice);
//start the timer
gettimeofday(&startTime, &Idunno);
//run the kernel
PDH_kernel<<<ceil(PDH_acnt/256.0), 256>>>(d_gpu_histogram, d_atom_list, PDH_acnt, PDH_res);
//copy the histogram results back from gpu over to cpu
cudaMemcpy(h_gpu_histogram, d_gpu_histogram, sizeof(bucket)*num_buckets, cudaMemcpyDeviceToHost);
//check total running time
report_running_time_GPU();
//print out the resulting histogram from the GPU
output_histogram(h_gpu_histogram);
//difference calculation--------------------------------------------------------------------------------
printf("Difference: \n");
diff_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
int bi;
for(bi = 0; bi < num_buckets; bi++)
{
diff_histogram[bi].d_cnt = histogram[bi].d_cnt - h_gpu_histogram[bi].d_cnt;
}
output_histogram(diff_histogram);
cudaFree(d_gpu_histogram);
cudaFree(d_atom_list);
free(histogram);
free(atom_list);
free(h_gpu_histogram);
free(diff_histogram);
return 0;
} | /* ==================================================================
Programmer: Yicheng Tu (ytu@cse.usf.edu)
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH in the C4 lab machines
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
bucket * h_gpu_histogram;
bucket * d_gpu_histogram;
bucket * diff_histogram;
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
atom * d_atom_list;
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
//get cuda error
// void cuda_err(cudaError_t e, char in[])
// {
// if (e != cudaSuccess){
// printf("CUDA Error: %s, %s \n", in, cudaGetErrorString(e));
// exit(EXIT_FAILURE);
// }
// }
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for(i = 0; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = p2p_distance(i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
SDH kernel - a really crappy one
*/
__global__ void PDH_kernel(bucket* d_histogram, atom* d_atom_list, long long acnt, double res)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
int j, h_pos;
double dist;
double x1;
double x2;
double y1;
double y2;
double z1;
double z2;
if(id < acnt)
for(j = id+1; j < acnt; j++)
{
x1 = d_atom_list[id].x_pos;
x2 = d_atom_list[j].x_pos;
y1 = d_atom_list[id].y_pos;
y2 = d_atom_list[j].y_pos;
z1 = d_atom_list[id].z_pos;
z2 = d_atom_list[j].z_pos;
dist = sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
h_pos = (int) (dist / res);
// __syncthreads();
// d_histogram[h_pos].d_cnt += 1; //very odd that this doesnt work but atomicAdd does... I wonder why
//long story short... it's basically because having synchronized threads
//at this point doesnt prevent race condiions on the increment itself
// __syncthreads();
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
}
}
/*
SDH kernel 2.0 - now the goal is to make a better kernel, that is at least 10x faster (at first)
*/
__global__ void PDH_kernel2(bucket* d_histogram, atom* d_atom_list, long long acnt, double res)
{
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
double report_running_time_GPU() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for GPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket* histogram){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
//printf("args are %d and %f\n", PDH_acnt, PDH_res);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/* start counting time */
printf("Starting CPU...\n");
gettimeofday(&startTime, &Idunno);
/* call CPU single thread version to compute the histogram */
PDH_baseline();
/* check the total running time */
report_running_time();
/* print out the histogram */
output_histogram(histogram);
printf("Starting GPU...\n");
//cudaDeviceReset();
//gpu code--------------------------------------------------------------------------------
h_gpu_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
//copy the atomlist over from host to device
hipMalloc((void**)&d_atom_list, sizeof(atom)*PDH_acnt);
hipMemcpy(d_atom_list, atom_list, sizeof(atom)*PDH_acnt, hipMemcpyHostToDevice);
//allocate the histogram data on the device
hipMalloc((void**)&d_gpu_histogram, sizeof(bucket)*num_buckets);
hipMemcpy(d_gpu_histogram, h_gpu_histogram, sizeof(bucket)*num_buckets,hipMemcpyHostToDevice);
//start the timer
gettimeofday(&startTime, &Idunno);
//run the kernel
PDH_kernel<<<ceil(PDH_acnt/256.0), 256>>>(d_gpu_histogram, d_atom_list, PDH_acnt, PDH_res);
//copy the histogram results back from gpu over to cpu
hipMemcpy(h_gpu_histogram, d_gpu_histogram, sizeof(bucket)*num_buckets, hipMemcpyDeviceToHost);
//check total running time
report_running_time_GPU();
//print out the resulting histogram from the GPU
output_histogram(h_gpu_histogram);
//difference calculation--------------------------------------------------------------------------------
printf("Difference: \n");
diff_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
int bi;
for(bi = 0; bi < num_buckets; bi++)
{
diff_histogram[bi].d_cnt = histogram[bi].d_cnt - h_gpu_histogram[bi].d_cnt;
}
output_histogram(diff_histogram);
hipFree(d_gpu_histogram);
hipFree(d_atom_list);
free(histogram);
free(atom_list);
free(h_gpu_histogram);
free(diff_histogram);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /* ==================================================================
Programmer: Yicheng Tu (ytu@cse.usf.edu)
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH in the C4 lab machines
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
bucket * h_gpu_histogram;
bucket * d_gpu_histogram;
bucket * diff_histogram;
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
atom * d_atom_list;
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
//get cuda error
// void cuda_err(cudaError_t e, char in[])
// {
// if (e != cudaSuccess){
// printf("CUDA Error: %s, %s \n", in, cudaGetErrorString(e));
// exit(EXIT_FAILURE);
// }
// }
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for(i = 0; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = p2p_distance(i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
SDH kernel - a really crappy one
*/
__global__ void PDH_kernel(bucket* d_histogram, atom* d_atom_list, long long acnt, double res)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
int j, h_pos;
double dist;
double x1;
double x2;
double y1;
double y2;
double z1;
double z2;
if(id < acnt)
for(j = id+1; j < acnt; j++)
{
x1 = d_atom_list[id].x_pos;
x2 = d_atom_list[j].x_pos;
y1 = d_atom_list[id].y_pos;
y2 = d_atom_list[j].y_pos;
z1 = d_atom_list[id].z_pos;
z2 = d_atom_list[j].z_pos;
dist = sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
h_pos = (int) (dist / res);
// __syncthreads();
// d_histogram[h_pos].d_cnt += 1; //very odd that this doesnt work but atomicAdd does... I wonder why
//long story short... it's basically because having synchronized threads
//at this point doesnt prevent race condiions on the increment itself
// __syncthreads();
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
}
}
/*
SDH kernel 2.0 - now the goal is to make a better kernel, that is at least 10x faster (at first)
*/
__global__ void PDH_kernel2(bucket* d_histogram, atom* d_atom_list, long long acnt, double res)
{
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
double report_running_time_GPU() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for GPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket* histogram){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
//printf("args are %d and %f\n", PDH_acnt, PDH_res);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/* start counting time */
printf("Starting CPU...\n");
gettimeofday(&startTime, &Idunno);
/* call CPU single thread version to compute the histogram */
PDH_baseline();
/* check the total running time */
report_running_time();
/* print out the histogram */
output_histogram(histogram);
printf("Starting GPU...\n");
//cudaDeviceReset();
//gpu code--------------------------------------------------------------------------------
h_gpu_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
//copy the atomlist over from host to device
hipMalloc((void**)&d_atom_list, sizeof(atom)*PDH_acnt);
hipMemcpy(d_atom_list, atom_list, sizeof(atom)*PDH_acnt, hipMemcpyHostToDevice);
//allocate the histogram data on the device
hipMalloc((void**)&d_gpu_histogram, sizeof(bucket)*num_buckets);
hipMemcpy(d_gpu_histogram, h_gpu_histogram, sizeof(bucket)*num_buckets,hipMemcpyHostToDevice);
//start the timer
gettimeofday(&startTime, &Idunno);
//run the kernel
PDH_kernel<<<ceil(PDH_acnt/256.0), 256>>>(d_gpu_histogram, d_atom_list, PDH_acnt, PDH_res);
//copy the histogram results back from gpu over to cpu
hipMemcpy(h_gpu_histogram, d_gpu_histogram, sizeof(bucket)*num_buckets, hipMemcpyDeviceToHost);
//check total running time
report_running_time_GPU();
//print out the resulting histogram from the GPU
output_histogram(h_gpu_histogram);
//difference calculation--------------------------------------------------------------------------------
printf("Difference: \n");
diff_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
int bi;
for(bi = 0; bi < num_buckets; bi++)
{
diff_histogram[bi].d_cnt = histogram[bi].d_cnt - h_gpu_histogram[bi].d_cnt;
}
output_histogram(diff_histogram);
hipFree(d_gpu_histogram);
hipFree(d_atom_list);
free(histogram);
free(atom_list);
free(h_gpu_histogram);
free(diff_histogram);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10PDH_kernelP10hist_entryP8atomdescxd
.globl _Z10PDH_kernelP10hist_entryP8atomdescxd
.p2align 8
.type _Z10PDH_kernelP10hist_entryP8atomdescxd,@function
_Z10PDH_kernelP10hist_entryP8atomdescxd:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_mul_i32 s15, s15, s4
s_mov_b32 s4, exec_lo
v_add_nc_u32_e32 v2, s15, v0
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i64_e64 s[2:3], v[2:3]
s_cbranch_execz .LBB0_4
v_add_nc_u32_e32 v4, 1, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_cmp_gt_i64_e32 vcc_lo, s[2:3], v[4:5]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_4
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x18
v_add3_u32 v3, v0, s15, 2
s_mov_b32 s8, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v6, 31, v3
s_waitcnt lgkmcnt(0)
v_mad_i64_i32 v[0:1], null, v2, 24, s[6:7]
v_add_co_u32 v2, vcc_lo, v3, -1
v_add_co_ci_u32_e32 v3, vcc_lo, -1, v6, vcc_lo
.LBB0_3:
v_mad_u64_u32 v[8:9], null, v4, 24, s[6:7]
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v10, vcc_lo, v0, 16
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v1, vcc_lo
s_clause 0x1
global_load_b64 v[12:13], v[0:1], off
global_load_b64 v[14:15], v[10:11], off offset:-8
v_mov_b32_e32 v4, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[6:7], null, v5, 24, v[4:5]
v_mov_b32_e32 v9, v6
s_clause 0x2
global_load_b128 v[4:7], v[8:9], off
global_load_b64 v[10:11], v[10:11], off
global_load_b64 v[8:9], v[8:9], off offset:16
s_waitcnt vmcnt(2)
v_add_f64 v[6:7], v[14:15], -v[6:7]
v_add_f64 v[4:5], v[12:13], -v[4:5]
s_waitcnt vmcnt(0)
v_add_f64 v[8:9], v[10:11], -v[8:9]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[6:7], v[6:7], v[6:7]
v_fma_f64 v[4:5], v[4:5], v[4:5], v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[4:5], v[8:9], v[8:9], v[4:5]
v_cmp_gt_f64_e32 vcc_lo, 0x10000000, v[4:5]
v_cndmask_b32_e64 v6, 0, 1, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v6, 8, v6
v_ldexp_f64 v[4:5], v[4:5], v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_rsq_f64_e32 v[6:7], v[4:5]
s_waitcnt_depctr 0xfff
v_mul_f64 v[8:9], v[4:5], v[6:7]
v_mul_f64 v[6:7], v[6:7], 0.5
v_fma_f64 v[10:11], -v[6:7], v[8:9], 0.5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
v_fma_f64 v[6:7], v[6:7], v[10:11], v[6:7]
v_fma_f64 v[10:11], -v[8:9], v[8:9], v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[8:9], v[10:11], v[6:7], v[8:9]
v_fma_f64 v[10:11], -v[8:9], v[8:9], v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_fma_f64 v[6:7], v[10:11], v[6:7], v[8:9]
v_cndmask_b32_e64 v8, 0, 0xffffff80, vcc_lo
v_cmp_class_f64_e64 vcc_lo, v[4:5], 0x260
v_ldexp_f64 v[6:7], v[6:7], v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v5, v7, v5 :: v_dual_cndmask_b32 v4, v6, v4
v_div_scale_f64 v[6:7], null, s[0:1], s[0:1], v[4:5]
v_div_scale_f64 v[12:13], vcc_lo, v[4:5], s[0:1], v[4:5]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f64_e32 v[8:9], v[6:7]
s_waitcnt_depctr 0xfff
v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[10:11], v[12:13], v[8:9]
v_fma_f64 v[6:7], -v[6:7], v[10:11], v[12:13]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f64 v[6:7], v[6:7], v[8:9], v[10:11]
v_div_fixup_f64 v[4:5], v[6:7], s[0:1], v[4:5]
v_mov_b32_e32 v6, 1
v_mov_b32_e32 v7, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_i32_f64_e32 v4, v[4:5]
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 3, v[4:5]
v_add_co_u32 v4, vcc_lo, s4, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
v_add_co_u32 v2, vcc_lo, v2, 1
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
global_atomic_add_u64 v[4:5], v[6:7], off
v_mov_b32_e32 v4, v2
v_ashrrev_i32_e32 v5, 31, v2
v_cmp_le_i64_e32 vcc_lo, s[2:3], v[2:3]
s_or_b32 s8, vcc_lo, s8
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s8
s_cbranch_execnz .LBB0_3
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10PDH_kernelP10hist_entryP8atomdescxd
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 16
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10PDH_kernelP10hist_entryP8atomdescxd, .Lfunc_end0-_Z10PDH_kernelP10hist_entryP8atomdescxd
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z11PDH_kernel2P10hist_entryP8atomdescxd
.globl _Z11PDH_kernel2P10hist_entryP8atomdescxd
.p2align 8
.type _Z11PDH_kernel2P10hist_entryP8atomdescxd,@function
_Z11PDH_kernel2P10hist_entryP8atomdescxd:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11PDH_kernel2P10hist_entryP8atomdescxd
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z11PDH_kernel2P10hist_entryP8atomdescxd, .Lfunc_end1-_Z11PDH_kernel2P10hist_entryP8atomdescxd
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 8
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10PDH_kernelP10hist_entryP8atomdescxd
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10PDH_kernelP10hist_entryP8atomdescxd.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 16
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 8
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11PDH_kernel2P10hist_entryP8atomdescxd
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z11PDH_kernel2P10hist_entryP8atomdescxd.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* ==================================================================
Programmer: Yicheng Tu (ytu@cse.usf.edu)
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH in the C4 lab machines
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
bucket * h_gpu_histogram;
bucket * d_gpu_histogram;
bucket * diff_histogram;
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
atom * d_atom_list;
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
//get cuda error
// void cuda_err(cudaError_t e, char in[])
// {
// if (e != cudaSuccess){
// printf("CUDA Error: %s, %s \n", in, cudaGetErrorString(e));
// exit(EXIT_FAILURE);
// }
// }
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for(i = 0; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = p2p_distance(i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
SDH kernel - a really crappy one
*/
__global__ void PDH_kernel(bucket* d_histogram, atom* d_atom_list, long long acnt, double res)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
int j, h_pos;
double dist;
double x1;
double x2;
double y1;
double y2;
double z1;
double z2;
if(id < acnt)
for(j = id+1; j < acnt; j++)
{
x1 = d_atom_list[id].x_pos;
x2 = d_atom_list[j].x_pos;
y1 = d_atom_list[id].y_pos;
y2 = d_atom_list[j].y_pos;
z1 = d_atom_list[id].z_pos;
z2 = d_atom_list[j].z_pos;
dist = sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
h_pos = (int) (dist / res);
// __syncthreads();
// d_histogram[h_pos].d_cnt += 1; //very odd that this doesnt work but atomicAdd does... I wonder why
//long story short... it's basically because having synchronized threads
//at this point doesnt prevent race condiions on the increment itself
// __syncthreads();
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
}
}
/*
SDH kernel 2.0 - now the goal is to make a better kernel, that is at least 10x faster (at first)
*/
__global__ void PDH_kernel2(bucket* d_histogram, atom* d_atom_list, long long acnt, double res)
{
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
double report_running_time_GPU() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for GPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket* histogram){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
//printf("args are %d and %f\n", PDH_acnt, PDH_res);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/* start counting time */
printf("Starting CPU...\n");
gettimeofday(&startTime, &Idunno);
/* call CPU single thread version to compute the histogram */
PDH_baseline();
/* check the total running time */
report_running_time();
/* print out the histogram */
output_histogram(histogram);
printf("Starting GPU...\n");
//cudaDeviceReset();
//gpu code--------------------------------------------------------------------------------
h_gpu_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
//copy the atomlist over from host to device
hipMalloc((void**)&d_atom_list, sizeof(atom)*PDH_acnt);
hipMemcpy(d_atom_list, atom_list, sizeof(atom)*PDH_acnt, hipMemcpyHostToDevice);
//allocate the histogram data on the device
hipMalloc((void**)&d_gpu_histogram, sizeof(bucket)*num_buckets);
hipMemcpy(d_gpu_histogram, h_gpu_histogram, sizeof(bucket)*num_buckets,hipMemcpyHostToDevice);
//start the timer
gettimeofday(&startTime, &Idunno);
//run the kernel
PDH_kernel<<<ceil(PDH_acnt/256.0), 256>>>(d_gpu_histogram, d_atom_list, PDH_acnt, PDH_res);
//copy the histogram results back from gpu over to cpu
hipMemcpy(h_gpu_histogram, d_gpu_histogram, sizeof(bucket)*num_buckets, hipMemcpyDeviceToHost);
//check total running time
report_running_time_GPU();
//print out the resulting histogram from the GPU
output_histogram(h_gpu_histogram);
//difference calculation--------------------------------------------------------------------------------
printf("Difference: \n");
diff_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
int bi;
for(bi = 0; bi < num_buckets; bi++)
{
diff_histogram[bi].d_cnt = histogram[bi].d_cnt - h_gpu_histogram[bi].d_cnt;
}
output_histogram(diff_histogram);
hipFree(d_gpu_histogram);
hipFree(d_atom_list);
free(histogram);
free(atom_list);
free(h_gpu_histogram);
free(diff_histogram);
return 0;
} | .text
.file "proj1-tanmay.hip"
.globl _Z12p2p_distanceii # -- Begin function _Z12p2p_distanceii
.p2align 4, 0x90
.type _Z12p2p_distanceii,@function
_Z12p2p_distanceii: # @_Z12p2p_distanceii
.cfi_startproc
# %bb.0:
movq atom_list(%rip), %rax
movslq %edi, %rcx
leaq (%rcx,%rcx,2), %rcx
movslq %esi, %rdx
leaq (%rdx,%rdx,2), %rdx
movsd (%rax,%rcx,8), %xmm1 # xmm1 = mem[0],zero
movsd 8(%rax,%rcx,8), %xmm2 # xmm2 = mem[0],zero
movsd 16(%rax,%rcx,8), %xmm0 # xmm0 = mem[0],zero
subsd (%rax,%rdx,8), %xmm1
mulsd %xmm1, %xmm1
subsd 8(%rax,%rdx,8), %xmm2
mulsd %xmm2, %xmm2
addsd %xmm1, %xmm2
subsd 16(%rax,%rdx,8), %xmm0
mulsd %xmm0, %xmm0
addsd %xmm2, %xmm0
xorpd %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jb sqrt # TAILCALL
# %bb.1: # %.split
sqrtsd %xmm0, %xmm0
retq
.Lfunc_end0:
.size _Z12p2p_distanceii, .Lfunc_end0-_Z12p2p_distanceii
.cfi_endproc
# -- End function
.globl _Z12PDH_baselinev # -- Begin function _Z12PDH_baselinev
.p2align 4, 0x90
.type _Z12PDH_baselinev,@function
_Z12PDH_baselinev: # @_Z12PDH_baselinev
.cfi_startproc
# %bb.0:
cmpq $0, PDH_acnt(%rip)
jle .LBB1_10
# %bb.1: # %.lr.ph11.preheader
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $1, %ebx
movl $40, %r14d
xorl %r15d, %r15d
xorpd %xmm3, %xmm3
jmp .LBB1_3
.p2align 4, 0x90
.LBB1_2: # %.loopexit
# in Loop: Header=BB1_3 Depth=1
incq %rbx
addq $24, %r14
cmpq %r15, PDH_acnt(%rip)
jle .LBB1_9
.LBB1_3: # %.lr.ph11
# =>This Loop Header: Depth=1
# Child Loop BB1_5 Depth 2
movq %r15, %rax
incq %r15
cmpq %r15, PDH_acnt(%rip)
jle .LBB1_2
# %bb.4: # %.lr.ph.preheader
# in Loop: Header=BB1_3 Depth=1
leaq (%rax,%rax,2), %r12
movq %r14, %r13
movq %rbx, %rbp
jmp .LBB1_5
.p2align 4, 0x90
.LBB1_7: # %call.sqrt
# in Loop: Header=BB1_5 Depth=2
callq sqrt
xorpd %xmm3, %xmm3
.LBB1_8: # %.lr.ph.split
# in Loop: Header=BB1_5 Depth=2
divsd PDH_res(%rip), %xmm0
cvttsd2si %xmm0, %eax
movq histogram(%rip), %rcx
cltq
incq (%rcx,%rax,8)
incq %rbp
addq $24, %r13
cmpq %rbp, PDH_acnt(%rip)
jle .LBB1_2
.LBB1_5: # %.lr.ph
# Parent Loop BB1_3 Depth=1
# => This Inner Loop Header: Depth=2
movq atom_list(%rip), %rax
movsd (%rax,%r12,8), %xmm1 # xmm1 = mem[0],zero
movsd 8(%rax,%r12,8), %xmm2 # xmm2 = mem[0],zero
subsd -16(%rax,%r13), %xmm1
movsd 16(%rax,%r12,8), %xmm0 # xmm0 = mem[0],zero
mulsd %xmm1, %xmm1
subsd -8(%rax,%r13), %xmm2
mulsd %xmm2, %xmm2
addsd %xmm1, %xmm2
subsd (%rax,%r13), %xmm0
mulsd %xmm0, %xmm0
addsd %xmm2, %xmm0
ucomisd %xmm3, %xmm0
jb .LBB1_7
# %bb.6: # in Loop: Header=BB1_5 Depth=2
sqrtsd %xmm0, %xmm0
jmp .LBB1_8
.LBB1_9:
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.LBB1_10: # %._crit_edge
xorl %eax, %eax
retq
.Lfunc_end1:
.size _Z12PDH_baselinev, .Lfunc_end1-_Z12PDH_baselinev
.cfi_endproc
# -- End function
.globl _Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd # -- Begin function _Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd
.p2align 4, 0x90
.type _Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd,@function
_Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd: # @_Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movsd %xmm0, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10PDH_kernelP10hist_entryP8atomdescxd, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd, .Lfunc_end2-_Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd
.cfi_endproc
# -- End function
.globl _Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd # -- Begin function _Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd
.p2align 4, 0x90
.type _Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd,@function
_Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd: # @_Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movsd %xmm0, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11PDH_kernel2P10hist_entryP8atomdescxd, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size _Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd, .Lfunc_end3-_Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z19report_running_timev
.LCPI4_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z19report_running_timev
.p2align 4, 0x90
.type _Z19report_running_timev,@function
_Z19report_running_timev: # @_Z19report_running_timev
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $endTime, %edi
movl $Idunno, %esi
callq gettimeofday
movq endTime(%rip), %rax
subq startTime(%rip), %rax
movq endTime+8(%rip), %rcx
subq startTime+8(%rip), %rcx
leaq 1000000(%rcx), %rbx
movq %rcx, %r14
sarq $63, %r14
addq %rax, %r14
testq %rcx, %rcx
cmovnsq %rcx, %rbx
movl $.L.str, %edi
movq %r14, %rsi
movq %rbx, %rdx
xorl %eax, %eax
callq printf
cvtsi2sd %r14, %xmm1
cvtsi2sd %rbx, %xmm0
divsd .LCPI4_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z19report_running_timev, .Lfunc_end4-_Z19report_running_timev
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z23report_running_time_GPUv
.LCPI5_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z23report_running_time_GPUv
.p2align 4, 0x90
.type _Z23report_running_time_GPUv,@function
_Z23report_running_time_GPUv: # @_Z23report_running_time_GPUv
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $endTime, %edi
movl $Idunno, %esi
callq gettimeofday
movq endTime(%rip), %rax
subq startTime(%rip), %rax
movq endTime+8(%rip), %rcx
subq startTime+8(%rip), %rcx
leaq 1000000(%rcx), %rbx
movq %rcx, %r14
sarq $63, %r14
addq %rax, %r14
testq %rcx, %rcx
cmovnsq %rcx, %rbx
movl $.L.str.1, %edi
movq %r14, %rsi
movq %rbx, %rdx
xorl %eax, %eax
callq printf
cvtsi2sd %r14, %xmm1
cvtsi2sd %rbx, %xmm0
divsd .LCPI5_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z23report_running_time_GPUv, .Lfunc_end5-_Z23report_running_time_GPUv
.cfi_endproc
# -- End function
.globl _Z16output_histogramP10hist_entry # -- Begin function _Z16output_histogramP10hist_entry
.p2align 4, 0x90
.type _Z16output_histogramP10hist_entry,@function
_Z16output_histogramP10hist_entry: # @_Z16output_histogramP10hist_entry
.cfi_startproc
# %bb.0:
cmpl $0, num_buckets(%rip)
jle .LBB6_9
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl $3435973837, %r12d # imm = 0xCCCCCCCD
xorl %r14d, %r14d
xorl %r15d, %r15d
jmp .LBB6_2
.p2align 4, 0x90
.LBB6_5: # in Loop: Header=BB6_2 Depth=1
movl $.L.str.4, %edi
movq %r15, %rsi
xorl %eax, %eax
callq printf
.LBB6_7: # in Loop: Header=BB6_2 Depth=1
incq %r14
movslq num_buckets(%rip), %rax
cmpq %rax, %r14
jge .LBB6_8
.LBB6_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %r14d, %eax
imulq %r12, %rax
shrq $34, %rax
leal (%rax,%rax,4), %eax
cmpl %eax, %r14d
jne .LBB6_4
# %bb.3: # in Loop: Header=BB6_2 Depth=1
movl $.L.str.2, %edi
movl %r14d, %esi
xorl %eax, %eax
callq printf
.LBB6_4: # in Loop: Header=BB6_2 Depth=1
movq (%rbx,%r14,8), %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
addq (%rbx,%r14,8), %r15
movl num_buckets(%rip), %eax
decl %eax
cmpq %rax, %r14
je .LBB6_5
# %bb.6: # in Loop: Header=BB6_2 Depth=1
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
jmp .LBB6_7
.LBB6_8:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r14
.cfi_restore %r15
.LBB6_9: # %._crit_edge
retq
.Lfunc_end6:
.size _Z16output_histogramP10hist_entry, .Lfunc_end6-_Z16output_histogramP10hist_entry
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI7_0:
.quad 0x40e3738000000000 # double 39836
.LCPI7_1:
.quad 0x41dfffffffc00000 # double 2147483647
.LCPI7_2:
.quad 0x40d6760000000000 # double 23000
.LCPI7_3:
.quad 0x3f70000000000000 # double 0.00390625
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
cltq
movq %rax, PDH_acnt(%rip)
movq 16(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, PDH_res(%rip)
movsd .LCPI7_0(%rip), %xmm1 # xmm1 = mem[0],zero
divsd %xmm0, %xmm1
cvttsd2si %xmm1, %eax
leal 1(%rax), %ecx
movl %ecx, num_buckets(%rip)
cltq
leaq 8(,%rax,8), %rdi
callq malloc
movq %rax, histogram(%rip)
movq PDH_acnt(%rip), %rax
shlq $3, %rax
leaq (%rax,%rax,2), %rdi
callq malloc
movq %rax, atom_list(%rip)
movl $1, %edi
callq srand
cmpq $0, PDH_acnt(%rip)
jle .LBB7_3
# %bb.1: # %.lr.ph.preheader
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB7_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
movsd .LCPI7_1(%rip), %xmm1 # xmm1 = mem[0],zero
divsd %xmm1, %xmm0
movsd .LCPI7_2(%rip), %xmm1 # xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movq atom_list(%rip), %rax
movsd %xmm0, (%rax,%rbx)
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd .LCPI7_1(%rip), %xmm0
mulsd .LCPI7_2(%rip), %xmm0
movq atom_list(%rip), %rax
movsd %xmm0, 8(%rax,%rbx)
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd .LCPI7_1(%rip), %xmm0
mulsd .LCPI7_2(%rip), %xmm0
movq atom_list(%rip), %rax
movsd %xmm0, 16(%rax,%rbx)
incq %r14
addq $24, %rbx
cmpq %r14, PDH_acnt(%rip)
jg .LBB7_2
.LBB7_3: # %._crit_edge
movl $.Lstr, %edi
callq puts@PLT
movl $startTime, %edi
movl $Idunno, %esi
callq gettimeofday
cmpq $0, PDH_acnt(%rip)
jle .LBB7_12
# %bb.4: # %.lr.ph11.i.preheader
movl $1, %ebx
movl $40, %r14d
xorl %r15d, %r15d
xorpd %xmm3, %xmm3
jmp .LBB7_6
.p2align 4, 0x90
.LBB7_5: # %.loopexit.i
# in Loop: Header=BB7_6 Depth=1
incq %rbx
addq $24, %r14
cmpq %r15, PDH_acnt(%rip)
jle .LBB7_12
.LBB7_6: # %.lr.ph11.i
# =>This Loop Header: Depth=1
# Child Loop BB7_8 Depth 2
movq %r15, %rax
incq %r15
cmpq %r15, PDH_acnt(%rip)
jle .LBB7_5
# %bb.7: # %.lr.ph.i.preheader
# in Loop: Header=BB7_6 Depth=1
leaq (%rax,%rax,2), %r12
movq %r14, %r13
movq %rbx, %rbp
jmp .LBB7_8
.p2align 4, 0x90
.LBB7_10: # %call.sqrt
# in Loop: Header=BB7_8 Depth=2
callq sqrt
xorpd %xmm3, %xmm3
.LBB7_11: # %.lr.ph.i.split
# in Loop: Header=BB7_8 Depth=2
divsd PDH_res(%rip), %xmm0
cvttsd2si %xmm0, %eax
movq histogram(%rip), %rcx
cltq
incq (%rcx,%rax,8)
incq %rbp
addq $24, %r13
cmpq %rbp, PDH_acnt(%rip)
jle .LBB7_5
.LBB7_8: # %.lr.ph.i
# Parent Loop BB7_6 Depth=1
# => This Inner Loop Header: Depth=2
movq atom_list(%rip), %rax
movsd (%rax,%r12,8), %xmm1 # xmm1 = mem[0],zero
movsd 8(%rax,%r12,8), %xmm2 # xmm2 = mem[0],zero
subsd -16(%rax,%r13), %xmm1
movsd 16(%rax,%r12,8), %xmm0 # xmm0 = mem[0],zero
mulsd %xmm1, %xmm1
subsd -8(%rax,%r13), %xmm2
mulsd %xmm2, %xmm2
addsd %xmm1, %xmm2
subsd (%rax,%r13), %xmm0
mulsd %xmm0, %xmm0
addsd %xmm2, %xmm0
ucomisd %xmm3, %xmm0
jb .LBB7_10
# %bb.9: # in Loop: Header=BB7_8 Depth=2
sqrtsd %xmm0, %xmm0
jmp .LBB7_11
.LBB7_12: # %_Z12PDH_baselinev.exit
movl $endTime, %edi
movl $Idunno, %esi
callq gettimeofday
movq endTime(%rip), %rax
subq startTime(%rip), %rax
movq endTime+8(%rip), %rcx
subq startTime+8(%rip), %rcx
leaq 1000000(%rcx), %rdx
movq %rcx, %rsi
sarq $63, %rsi
addq %rax, %rsi
testq %rcx, %rcx
cmovnsq %rcx, %rdx
movl $.L.str, %edi
xorl %eax, %eax
callq printf
cmpl $0, num_buckets(%rip)
jle .LBB7_20
# %bb.13: # %.lr.ph.i17.preheader
movq histogram(%rip), %r15
movl $3435973837, %r12d # imm = 0xCCCCCCCD
xorl %ebx, %ebx
xorl %r14d, %r14d
jmp .LBB7_14
.p2align 4, 0x90
.LBB7_18: # in Loop: Header=BB7_14 Depth=1
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
.LBB7_19: # in Loop: Header=BB7_14 Depth=1
incq %rbx
movslq num_buckets(%rip), %rax
cmpq %rax, %rbx
jge .LBB7_20
.LBB7_14: # %.lr.ph.i17
# =>This Inner Loop Header: Depth=1
movl %ebx, %eax
imulq %r12, %rax
shrq $34, %rax
leal (%rax,%rax,4), %eax
cmpl %eax, %ebx
jne .LBB7_16
# %bb.15: # in Loop: Header=BB7_14 Depth=1
movl $.L.str.2, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
.LBB7_16: # in Loop: Header=BB7_14 Depth=1
movq (%r15,%rbx,8), %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
addq (%r15,%rbx,8), %r14
movl num_buckets(%rip), %eax
decl %eax
cmpq %rax, %rbx
jne .LBB7_18
# %bb.17: # in Loop: Header=BB7_14 Depth=1
movl $.L.str.4, %edi
movq %r14, %rsi
xorl %eax, %eax
callq printf
jmp .LBB7_19
.LBB7_20: # %_Z16output_histogramP10hist_entry.exit
movl $.Lstr.1, %edi
callq puts@PLT
movslq num_buckets(%rip), %rdi
shlq $3, %rdi
callq malloc
movq %rax, h_gpu_histogram(%rip)
movq PDH_acnt(%rip), %rax
shlq $3, %rax
leaq (%rax,%rax,2), %rsi
movl $d_atom_list, %edi
callq hipMalloc
movq d_atom_list(%rip), %rdi
movq atom_list(%rip), %rsi
movq PDH_acnt(%rip), %rax
shlq $3, %rax
leaq (%rax,%rax,2), %rdx
movl $1, %ecx
callq hipMemcpy
movslq num_buckets(%rip), %rsi
shlq $3, %rsi
movl $d_gpu_histogram, %edi
callq hipMalloc
movq d_gpu_histogram(%rip), %rdi
movq h_gpu_histogram(%rip), %rsi
movslq num_buckets(%rip), %rdx
shlq $3, %rdx
movl $1, %ecx
callq hipMemcpy
movl $startTime, %edi
movl $Idunno, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq PDH_acnt(%rip), %xmm0
mulsd .LCPI7_3(%rip), %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %rax
movl %eax, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $256, %rdx # imm = 0x100
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB7_22
# %bb.21:
movq d_gpu_histogram(%rip), %rax
movq d_atom_list(%rip), %rcx
movq PDH_acnt(%rip), %rdx
movsd PDH_res(%rip), %xmm0 # xmm0 = mem[0],zero
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 56(%rsp)
movsd %xmm0, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10PDH_kernelP10hist_entryP8atomdescxd, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB7_22:
movq h_gpu_histogram(%rip), %rdi
movq d_gpu_histogram(%rip), %rsi
movslq num_buckets(%rip), %rdx
shlq $3, %rdx
movl $2, %ecx
callq hipMemcpy
movl $endTime, %edi
movl $Idunno, %esi
callq gettimeofday
movq endTime(%rip), %rax
subq startTime(%rip), %rax
movq endTime+8(%rip), %rcx
subq startTime+8(%rip), %rcx
leaq 1000000(%rcx), %rdx
movq %rcx, %rsi
sarq $63, %rsi
addq %rax, %rsi
testq %rcx, %rcx
cmovnsq %rcx, %rdx
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
cmpl $0, num_buckets(%rip)
jle .LBB7_30
# %bb.23: # %.lr.ph.i23.preheader
movq h_gpu_histogram(%rip), %r15
movl $3435973837, %r12d # imm = 0xCCCCCCCD
xorl %ebx, %ebx
xorl %r14d, %r14d
jmp .LBB7_24
.p2align 4, 0x90
.LBB7_28: # in Loop: Header=BB7_24 Depth=1
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
.LBB7_29: # in Loop: Header=BB7_24 Depth=1
incq %rbx
movslq num_buckets(%rip), %rax
cmpq %rax, %rbx
jge .LBB7_30
.LBB7_24: # %.lr.ph.i23
# =>This Inner Loop Header: Depth=1
movl %ebx, %eax
imulq %r12, %rax
shrq $34, %rax
leal (%rax,%rax,4), %eax
cmpl %eax, %ebx
jne .LBB7_26
# %bb.25: # in Loop: Header=BB7_24 Depth=1
movl $.L.str.2, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
.LBB7_26: # in Loop: Header=BB7_24 Depth=1
movq (%r15,%rbx,8), %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
addq (%r15,%rbx,8), %r14
movl num_buckets(%rip), %eax
decl %eax
cmpq %rax, %rbx
jne .LBB7_28
# %bb.27: # in Loop: Header=BB7_24 Depth=1
movl $.L.str.4, %edi
movq %r14, %rsi
xorl %eax, %eax
callq printf
jmp .LBB7_29
.LBB7_30: # %_Z16output_histogramP10hist_entry.exit27
movl $.Lstr.2, %edi
callq puts@PLT
movslq num_buckets(%rip), %r14
leaq (,%r14,8), %rdi
callq malloc
movq %rax, %rbx
movl %r14d, %eax
movq %rbx, diff_histogram(%rip)
testq %r14, %r14
jle .LBB7_33
# %bb.31: # %.lr.ph39
movq histogram(%rip), %rcx
movq h_gpu_histogram(%rip), %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB7_32: # =>This Inner Loop Header: Depth=1
movq (%rcx,%rsi,8), %rdi
subq (%rdx,%rsi,8), %rdi
movq %rdi, (%rbx,%rsi,8)
incq %rsi
cmpq %rsi, %rax
jne .LBB7_32
.LBB7_33: # %._crit_edge40
testl %eax, %eax
jle .LBB7_41
# %bb.34: # %.lr.ph.i28.preheader
movl $3435973837, %r12d # imm = 0xCCCCCCCD
xorl %r14d, %r14d
xorl %r15d, %r15d
jmp .LBB7_35
.p2align 4, 0x90
.LBB7_39: # in Loop: Header=BB7_35 Depth=1
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
.LBB7_40: # in Loop: Header=BB7_35 Depth=1
incq %r14
movslq num_buckets(%rip), %rax
cmpq %rax, %r14
jge .LBB7_41
.LBB7_35: # %.lr.ph.i28
# =>This Inner Loop Header: Depth=1
movl %r14d, %eax
imulq %r12, %rax
shrq $34, %rax
leal (%rax,%rax,4), %eax
cmpl %eax, %r14d
jne .LBB7_37
# %bb.36: # in Loop: Header=BB7_35 Depth=1
movl $.L.str.2, %edi
movl %r14d, %esi
xorl %eax, %eax
callq printf
.LBB7_37: # in Loop: Header=BB7_35 Depth=1
movq (%rbx,%r14,8), %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
addq (%rbx,%r14,8), %r15
movl num_buckets(%rip), %eax
decl %eax
cmpq %rax, %r14
jne .LBB7_39
# %bb.38: # in Loop: Header=BB7_35 Depth=1
movl $.L.str.4, %edi
movq %r15, %rsi
xorl %eax, %eax
callq printf
jmp .LBB7_40
.LBB7_41: # %_Z16output_histogramP10hist_entry.exit32
movq d_gpu_histogram(%rip), %rdi
callq hipFree
movq d_atom_list(%rip), %rdi
callq hipFree
movq histogram(%rip), %rdi
callq free
movq atom_list(%rip), %rdi
callq free
movq h_gpu_histogram(%rip), %rdi
callq free
movq diff_histogram(%rip), %rdi
callq free
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end7:
.size main, .Lfunc_end7-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10PDH_kernelP10hist_entryP8atomdescxd, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11PDH_kernel2P10hist_entryP8atomdescxd, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type histogram,@object # @histogram
.bss
.globl histogram
.p2align 3, 0x0
histogram:
.quad 0
.size histogram, 8
.type h_gpu_histogram,@object # @h_gpu_histogram
.globl h_gpu_histogram
.p2align 3, 0x0
h_gpu_histogram:
.quad 0
.size h_gpu_histogram, 8
.type d_gpu_histogram,@object # @d_gpu_histogram
.globl d_gpu_histogram
.p2align 3, 0x0
d_gpu_histogram:
.quad 0
.size d_gpu_histogram, 8
.type diff_histogram,@object # @diff_histogram
.globl diff_histogram
.p2align 3, 0x0
diff_histogram:
.quad 0
.size diff_histogram, 8
.type PDH_acnt,@object # @PDH_acnt
.globl PDH_acnt
.p2align 3, 0x0
PDH_acnt:
.quad 0 # 0x0
.size PDH_acnt, 8
.type num_buckets,@object # @num_buckets
.globl num_buckets
.p2align 2, 0x0
num_buckets:
.long 0 # 0x0
.size num_buckets, 4
.type PDH_res,@object # @PDH_res
.globl PDH_res
.p2align 3, 0x0
PDH_res:
.quad 0x0000000000000000 # double 0
.size PDH_res, 8
.type atom_list,@object # @atom_list
.globl atom_list
.p2align 3, 0x0
atom_list:
.quad 0
.size atom_list, 8
.type d_atom_list,@object # @d_atom_list
.globl d_atom_list
.p2align 3, 0x0
d_atom_list:
.quad 0
.size d_atom_list, 8
.type Idunno,@object # @Idunno
.globl Idunno
.p2align 2, 0x0
Idunno:
.zero 8
.size Idunno, 8
.type startTime,@object # @startTime
.globl startTime
.p2align 3, 0x0
startTime:
.zero 16
.size startTime, 16
.type endTime,@object # @endTime
.globl endTime
.p2align 3, 0x0
endTime:
.zero 16
.size endTime, 16
.type _Z10PDH_kernelP10hist_entryP8atomdescxd,@object # @_Z10PDH_kernelP10hist_entryP8atomdescxd
.section .rodata,"a",@progbits
.globl _Z10PDH_kernelP10hist_entryP8atomdescxd
.p2align 3, 0x0
_Z10PDH_kernelP10hist_entryP8atomdescxd:
.quad _Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd
.size _Z10PDH_kernelP10hist_entryP8atomdescxd, 8
.type _Z11PDH_kernel2P10hist_entryP8atomdescxd,@object # @_Z11PDH_kernel2P10hist_entryP8atomdescxd
.globl _Z11PDH_kernel2P10hist_entryP8atomdescxd
.p2align 3, 0x0
_Z11PDH_kernel2P10hist_entryP8atomdescxd:
.quad _Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd
.size _Z11PDH_kernel2P10hist_entryP8atomdescxd, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Running time for CPU version: %ld.%06ld\n"
.size .L.str, 41
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Running time for GPU version: %ld.%06ld\n"
.size .L.str.1, 41
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\n%02d: "
.size .L.str.2, 8
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "%15lld "
.size .L.str.3, 8
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\n T:%lld \n"
.size .L.str.4, 11
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "| "
.size .L.str.5, 3
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10PDH_kernelP10hist_entryP8atomdescxd"
.size .L__unnamed_1, 40
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z11PDH_kernel2P10hist_entryP8atomdescxd"
.size .L__unnamed_2, 41
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Starting CPU..."
.size .Lstr, 16
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Starting GPU..."
.size .Lstr.1, 16
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Difference: "
.size .Lstr.2, 13
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd
.addrsig_sym _Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym d_gpu_histogram
.addrsig_sym d_atom_list
.addrsig_sym Idunno
.addrsig_sym startTime
.addrsig_sym endTime
.addrsig_sym _Z10PDH_kernelP10hist_entryP8atomdescxd
.addrsig_sym _Z11PDH_kernel2P10hist_entryP8atomdescxd
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z11PDH_kernel2P10hist_entryP8atomdescxd
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z10PDH_kernelP10hist_entryP8atomdescxd
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R8, R8, c[0x0][0x0], R3 ; /* 0x0000000008087a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R8, c[0x0][0x170], PT ; /* 0x00005c0008007a0c */
/* 0x000fe40003f06070 */
/*0050*/ SHF.R.S32.HI R2, RZ, 0x1f, R8 ; /* 0x0000001fff027819 */
/* 0x000fc80000011408 */
/*0060*/ ISETP.GE.AND.EX P0, PT, R2, c[0x0][0x174], PT, P0 ; /* 0x00005d0002007a0c */
/* 0x000fda0003f06300 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ IADD3 R0, R8, 0x1, RZ ; /* 0x0000000108007810 */
/* 0x000fc80007ffe0ff */
/*0090*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fe40003f06070 */
/*00a0*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */
/* 0x000fc80000011400 */
/*00b0*/ ISETP.GE.AND.EX P0, PT, R3, c[0x0][0x174], PT, P0 ; /* 0x00005d0003007a0c */
/* 0x000fda0003f06300 */
/*00c0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00d0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x18 ; /* 0x00000018ff097424 */
/* 0x000fe200078e00ff */
/*00e0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00f0*/ IMAD R5, R2, 0x18, RZ ; /* 0x0000001802057824 */
/* 0x000fe400078e02ff */
/*0100*/ IMAD.WIDE.U32 R8, R8, R9, c[0x0][0x168] ; /* 0x00005a0008087625 */
/* 0x000fc800078e0009 */
/*0110*/ IMAD.MOV.U32 R11, RZ, RZ, R3 ; /* 0x000000ffff0b7224 */
/* 0x000fe400078e0003 */
/*0120*/ IMAD.MOV.U32 R10, RZ, RZ, R0 ; /* 0x000000ffff0a7224 */
/* 0x000fe400078e0000 */
/*0130*/ IMAD.IADD R9, R9, 0x1, R5 ; /* 0x0000000109097824 */
/* 0x000fc800078e0205 */
/*0140*/ IMAD.MOV.U32 R3, RZ, RZ, 0x18 ; /* 0x00000018ff037424 */
/* 0x001fe200078e00ff */
/*0150*/ LDG.E.64 R12, [R8.64+0x8] ; /* 0x00000804080c7981 */
/* 0x000ea2000c1e1b00 */
/*0160*/ IMAD R11, R11, 0x18, RZ ; /* 0x000000180b0b7824 */
/* 0x000fe400078e02ff */
/*0170*/ IMAD.WIDE.U32 R2, R10, R3, c[0x0][0x168] ; /* 0x00005a000a027625 */
/* 0x000fe200078e0003 */
/*0180*/ LDG.E.64 R6, [R8.64] ; /* 0x0000000408067981 */
/* 0x000ee6000c1e1b00 */
/*0190*/ IMAD.IADD R3, R3, 0x1, R11 ; /* 0x0000000103037824 */
/* 0x000fe200078e020b */
/*01a0*/ LDG.E.64 R16, [R8.64+0x10] ; /* 0x0000100408107981 */
/* 0x000f28000c1e1b00 */
/*01b0*/ LDG.E.64 R10, [R2.64+0x8] ; /* 0x00000804020a7981 */
/* 0x000ea8000c1e1b00 */
/*01c0*/ LDG.E.64 R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000ee8000c1e1b00 */
/*01d0*/ LDG.E.64 R14, [R2.64+0x10] ; /* 0x00001004020e7981 */
/* 0x000f22000c1e1b00 */
/*01e0*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*01f0*/ BSSY B0, 0x3b0 ; /* 0x000001b000007945 */
/* 0x000fe20003800000 */
/*0200*/ DADD R10, -R10, R12 ; /* 0x000000000a0a7229 */
/* 0x004e08000000010c */
/*0210*/ DADD R4, -R4, R6 ; /* 0x0000000004047229 */
/* 0x008fc80000000106 */
/*0220*/ DMUL R10, R10, R10 ; /* 0x0000000a0a0a7228 */
/* 0x001e080000000000 */
/*0230*/ DADD R14, -R14, R16 ; /* 0x000000000e0e7229 */
/* 0x010fc80000000110 */
/*0240*/ DFMA R6, R4, R4, R10 ; /* 0x000000040406722b */
/* 0x001e0c000000000a */
/*0250*/ DFMA R6, R14, R14, R6 ; /* 0x0000000e0e06722b */
/* 0x001e0c0000000006 */
/*0260*/ MUFU.RSQ64H R5, R7 ; /* 0x0000000700057308 */
/* 0x001e280000001c00 */
/*0270*/ IADD3 R4, R7, -0x3500000, RZ ; /* 0xfcb0000007047810 */
/* 0x000fe20007ffe0ff */
/*0280*/ IMAD.MOV.U32 R12, RZ, RZ, 0x0 ; /* 0x00000000ff0c7424 */
/* 0x000fe400078e00ff */
/*0290*/ IMAD.MOV.U32 R13, RZ, RZ, 0x3fd80000 ; /* 0x3fd80000ff0d7424 */
/* 0x000fc600078e00ff */
/*02a0*/ DMUL R10, R4, R4 ; /* 0x00000004040a7228 */
/* 0x001e0c0000000000 */
/*02b0*/ DFMA R10, R6, -R10, 1 ; /* 0x3ff00000060a742b */
/* 0x001e0c000000080a */
/*02c0*/ DFMA R2, R10, R12, 0.5 ; /* 0x3fe000000a02742b */
/* 0x001fc8000000000c */
/*02d0*/ DMUL R10, R4, R10 ; /* 0x0000000a040a7228 */
/* 0x000e220000000000 */
/*02e0*/ ISETP.GE.U32.AND P0, PT, R4, 0x7ca00000, PT ; /* 0x7ca000000400780c */
/* 0x000fca0003f06070 */
/*02f0*/ DFMA R12, R2, R10, R4 ; /* 0x0000000a020c722b */
/* 0x001e0c0000000004 */
/*0300*/ DMUL R14, R6, R12 ; /* 0x0000000c060e7228 */
/* 0x001e080000000000 */
/*0310*/ IADD3 R11, R13, -0x100000, RZ ; /* 0xfff000000d0b7810 */
/* 0x000fe20007ffe0ff */
/*0320*/ IMAD.MOV.U32 R10, RZ, RZ, R12 ; /* 0x000000ffff0a7224 */
/* 0x000fe200078e000c */
/*0330*/ DFMA R16, R14, -R14, R6 ; /* 0x8000000e0e10722b */
/* 0x001e0c0000000006 */
/*0340*/ DFMA R2, R16, R10, R14 ; /* 0x0000000a1002722b */
/* 0x001062000000000e */
/*0350*/ @!P0 BRA 0x3a0 ; /* 0x0000004000008947 */
/* 0x000fea0003800000 */
/*0360*/ MOV R2, 0x380 ; /* 0x0000038000027802 */
/* 0x002fca0000000f00 */
/*0370*/ CALL.REL.NOINC 0xbd0 ; /* 0x0000085000007944 */
/* 0x001fea0003c00000 */
/*0380*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */
/* 0x002fe400078e0004 */
/*0390*/ IMAD.MOV.U32 R3, RZ, RZ, R5 ; /* 0x000000ffff037224 */
/* 0x000fe400078e0005 */
/*03a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*03b0*/ MUFU.RCP64H R5, c[0x0][0x17c] ; /* 0x00005f0000057b08 */
/* 0x000ea20000001800 */
/*03c0*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff0a7624 */
/* 0x001fe200078e00ff */
/*03d0*/ FSETP.GEU.AND P1, PT, |R3|, 6.5827683646048100446e-37, PT ; /* 0x036000000300780b */
/* 0x002fe20003f2e200 */
/*03e0*/ IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff0b7624 */
/* 0x000fe200078e00ff */
/*03f0*/ BSSY B0, 0x510 ; /* 0x0000011000007945 */
/* 0x000fe20003800000 */
/*0400*/ IMAD.MOV.U32 R4, RZ, RZ, 0x1 ; /* 0x00000001ff047424 */
/* 0x000fcc00078e00ff */
/*0410*/ DFMA R6, R4, -R10, 1 ; /* 0x3ff000000406742b */
/* 0x004e0c000000080a */
/*0420*/ DFMA R6, R6, R6, R6 ; /* 0x000000060606722b */
/* 0x001e0c0000000006 */
/*0430*/ DFMA R6, R4, R6, R4 ; /* 0x000000060406722b */
/* 0x001e0c0000000004 */
/*0440*/ DFMA R4, R6, -R10, 1 ; /* 0x3ff000000604742b */
/* 0x001e0c000000080a */
/*0450*/ DFMA R4, R6, R4, R6 ; /* 0x000000040604722b */
/* 0x001e0c0000000006 */
/*0460*/ DMUL R6, R4, R2 ; /* 0x0000000204067228 */
/* 0x001e0c0000000000 */
/*0470*/ DFMA R10, R6, -c[0x0][0x178], R2 ; /* 0x80005e00060a7a2b */
/* 0x001e0c0000000002 */
/*0480*/ DFMA R4, R4, R10, R6 ; /* 0x0000000a0404722b */
/* 0x001e140000000006 */
/*0490*/ FFMA R6, RZ, c[0x0][0x17c], R5 ; /* 0x00005f00ff067a23 */
/* 0x001fca0000000005 */
/*04a0*/ FSETP.GT.AND P0, PT, |R6|, 1.469367938527859385e-39, PT ; /* 0x001000000600780b */
/* 0x000fda0003f04200 */
/*04b0*/ @P0 BRA P1, 0x500 ; /* 0x0000004000000947 */
/* 0x000fea0000800000 */
/*04c0*/ IMAD.MOV.U32 R4, RZ, RZ, R2 ; /* 0x000000ffff047224 */
/* 0x000fe200078e0002 */
/*04d0*/ MOV R16, 0x500 ; /* 0x0000050000107802 */
/* 0x000fe20000000f00 */
/*04e0*/ IMAD.MOV.U32 R5, RZ, RZ, R3 ; /* 0x000000ffff057224 */
/* 0x000fe400078e0003 */
/*04f0*/ CALL.REL.NOINC 0x5e0 ; /* 0x000000e000007944 */
/* 0x000fea0003c00000 */
/*0500*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0510*/ F2I.F64.TRUNC R2, R4 ; /* 0x0000000400027311 */
/* 0x000e22000030d100 */
/*0520*/ IADD3 R10, R0, 0x1, RZ ; /* 0x00000001000a7810 */
/* 0x000fe20007ffe0ff */
/*0530*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */
/* 0x000fe400078e00ff */
/*0540*/ IMAD.MOV.U32 R6, RZ, RZ, 0x1 ; /* 0x00000001ff067424 */
/* 0x000fe200078e00ff */
/*0550*/ ISETP.GE.U32.AND P0, PT, R10, c[0x0][0x170], PT ; /* 0x00005c000a007a0c */
/* 0x000fe20003f06070 */
/*0560*/ IMAD.MOV.U32 R7, RZ, RZ, 0x0 ; /* 0x00000000ff077424 */
/* 0x000fe200078e00ff */
/*0570*/ SHF.R.S32.HI R11, RZ, 0x1f, R10 ; /* 0x0000001fff0b7819 */
/* 0x000fc8000001140a */
/*0580*/ ISETP.GE.AND.EX P0, PT, R11, c[0x0][0x174], PT, P0 ; /* 0x00005d000b007a0c */
/* 0x000fe20003f06300 */
/*0590*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x001fca00078e0203 */
/*05a0*/ RED.E.ADD.64.STRONG.GPU [R2.64], R6 ; /* 0x000000060200798e */
/* 0x0001e2000c10e584 */
/*05b0*/ IMAD.MOV.U32 R0, RZ, RZ, R10 ; /* 0x000000ffff007224 */
/* 0x000fcc00078e000a */
/*05c0*/ @!P0 BRA 0x140 ; /* 0xfffffb7000008947 */
/* 0x000fea000383ffff */
/*05d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*05e0*/ IMAD.MOV.U32 R18, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff127624 */
/* 0x000fe200078e00ff */
/*05f0*/ FSETP.GEU.AND P2, PT, |R5|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000000500780b */
/* 0x040fe20003f4e200 */
/*0600*/ IMAD.MOV.U32 R17, RZ, RZ, 0x1ca00000 ; /* 0x1ca00000ff117424 */
/* 0x000fe200078e00ff */
/*0610*/ LOP3.LUT R20, R5, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000005147812 */
/* 0x000fe200078ec0ff */
/*0620*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */
/* 0x000fe200078e00ff */
/*0630*/ FSETP.GEU.AND P0, PT, |R18|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000001200780b */
/* 0x040fe20003f0e200 */
/*0640*/ BSSY B1, 0xb90 ; /* 0x0000054000017945 */
/* 0x000fe20003800000 */
/*0650*/ LOP3.LUT R6, R18.reuse, 0x800fffff, RZ, 0xc0, !PT ; /* 0x800fffff12067812 */
/* 0x040fe400078ec0ff */
/*0660*/ LOP3.LUT R19, R18, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000012137812 */
/* 0x000fe400078ec0ff */
/*0670*/ LOP3.LUT R7, R6, 0x3ff00000, RZ, 0xfc, !PT ; /* 0x3ff0000006077812 */
/* 0x000fe200078efcff */
/*0680*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff067624 */
/* 0x000fe200078e00ff */
/*0690*/ ISETP.GE.U32.AND P1, PT, R20, R19, PT ; /* 0x000000131400720c */
/* 0x000fe20003f26070 */
/*06a0*/ IMAD.MOV.U32 R22, RZ, RZ, R19 ; /* 0x000000ffff167224 */
/* 0x000fe200078e0013 */
/*06b0*/ MOV R21, R20 ; /* 0x0000001400157202 */
/* 0x000fc40000000f00 */
/*06c0*/ SEL R17, R17, 0x63400000, !P1 ; /* 0x6340000011117807 */
/* 0x000fe20004800000 */
/*06d0*/ @!P0 IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff028624 */
/* 0x000fe400078e00ff */
/*06e0*/ @!P0 IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff038624 */
/* 0x000fe200078e00ff */
/*06f0*/ @!P2 LOP3.LUT R12, R17, 0x80000000, R5, 0xf8, !PT ; /* 0x80000000110ca812 */
/* 0x000fc800078ef805 */
/*0700*/ @!P2 LOP3.LUT R13, R12, 0x100000, RZ, 0xfc, !PT ; /* 0x001000000c0da812 */
/* 0x000fe200078efcff */
/*0710*/ @!P0 DMUL R6, R2, 8.98846567431157953865e+307 ; /* 0x7fe0000002068828 */
/* 0x000e220000000000 */
/*0720*/ @!P2 IMAD.MOV.U32 R12, RZ, RZ, RZ ; /* 0x000000ffff0ca224 */
/* 0x000fca00078e00ff */
/*0730*/ MUFU.RCP64H R11, R7 ; /* 0x00000007000b7308 */
/* 0x001e280000001800 */
/*0740*/ @!P0 LOP3.LUT R22, R7, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000007168812 */
/* 0x000fc800078ec0ff */
/*0750*/ IADD3 R23, R22, -0x1, RZ ; /* 0xffffffff16177810 */
/* 0x000fe20007ffe0ff */
/*0760*/ DFMA R2, R10, -R6, 1 ; /* 0x3ff000000a02742b */
/* 0x001e0c0000000806 */
/*0770*/ DFMA R14, R2, R2, R2 ; /* 0x00000002020e722b */
/* 0x0010640000000002 */
/*0780*/ LOP3.LUT R3, R17, 0x800fffff, R5, 0xf8, !PT ; /* 0x800fffff11037812 */
/* 0x001fe200078ef805 */
/*0790*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */
/* 0x000fc600078e0004 */
/*07a0*/ DFMA R10, R10, R14, R10 ; /* 0x0000000e0a0a722b */
/* 0x002e08000000000a */
/*07b0*/ @!P2 DFMA R2, R2, 2, -R12 ; /* 0x400000000202a82b */
/* 0x000e48000000080c */
/*07c0*/ DFMA R12, R10, -R6, 1 ; /* 0x3ff000000a0c742b */
/* 0x001e0c0000000806 */
/*07d0*/ @!P2 LOP3.LUT R21, R3, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff000000315a812 */
/* 0x002fe200078ec0ff */
/*07e0*/ DFMA R10, R10, R12, R10 ; /* 0x0000000c0a0a722b */
/* 0x001e06000000000a */
/*07f0*/ IADD3 R14, R21, -0x1, RZ ; /* 0xffffffff150e7810 */
/* 0x000fc60007ffe0ff */
/*0800*/ DMUL R12, R10, R2 ; /* 0x000000020a0c7228 */
/* 0x001e220000000000 */
/*0810*/ ISETP.GT.U32.AND P0, PT, R14, 0x7feffffe, PT ; /* 0x7feffffe0e00780c */
/* 0x000fc80003f04070 */
/*0820*/ ISETP.GT.U32.OR P0, PT, R23, 0x7feffffe, P0 ; /* 0x7feffffe1700780c */
/* 0x000fe20000704470 */
/*0830*/ DFMA R14, R12, -R6, R2 ; /* 0x800000060c0e722b */
/* 0x001e0c0000000002 */
/*0840*/ DFMA R14, R10, R14, R12 ; /* 0x0000000e0a0e722b */
/* 0x00104c000000000c */
/*0850*/ @P0 BRA 0xa00 ; /* 0x000001a000000947 */
/* 0x000fea0003800000 */
/*0860*/ IMAD.IADD R19, R20, 0x1, -R19 ; /* 0x0000000114137824 */
/* 0x003fca00078e0a13 */
/*0870*/ IMNMX R19, R19, -0x46a00000, !PT ; /* 0xb960000013137817 */
/* 0x000fc80007800200 */
/*0880*/ IMNMX R4, R19, 0x46a00000, PT ; /* 0x46a0000013047817 */
/* 0x000fca0003800200 */
/*0890*/ IMAD.IADD R17, R4, 0x1, -R17 ; /* 0x0000000104117824 */
/* 0x000fe400078e0a11 */
/*08a0*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fc600078e00ff */
/*08b0*/ IADD3 R5, R17, 0x7fe00000, RZ ; /* 0x7fe0000011057810 */
/* 0x000fcc0007ffe0ff */
/*08c0*/ DMUL R10, R14, R4 ; /* 0x000000040e0a7228 */
/* 0x000e140000000000 */
/*08d0*/ FSETP.GTU.AND P0, PT, |R11|, 1.469367938527859385e-39, PT ; /* 0x001000000b00780b */
/* 0x001fda0003f0c200 */
/*08e0*/ @P0 BRA 0xb80 ; /* 0x0000029000000947 */
/* 0x000fea0003800000 */
/*08f0*/ DFMA R2, R14, -R6, R2 ; /* 0x800000060e02722b */
/* 0x000e220000000002 */
/*0900*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fd200078e00ff */
/*0910*/ FSETP.NEU.AND P0, PT, R3.reuse, RZ, PT ; /* 0x000000ff0300720b */
/* 0x041fe40003f0d000 */
/*0920*/ LOP3.LUT R2, R3, c[0x0][0x17c], RZ, 0x3c, !PT ; /* 0x00005f0003027a12 */
/* 0x000fc800078e3cff */
/*0930*/ LOP3.LUT R7, R2, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000002077812 */
/* 0x000fc800078ec0ff */
/*0940*/ LOP3.LUT R5, R7, R5, RZ, 0xfc, !PT ; /* 0x0000000507057212 */
/* 0x000fc600078efcff */
/*0950*/ @!P0 BRA 0xb80 ; /* 0x0000022000008947 */
/* 0x000fea0003800000 */
/*0960*/ IMAD.MOV R3, RZ, RZ, -R17 ; /* 0x000000ffff037224 */
/* 0x000fe200078e0a11 */
/*0970*/ DMUL.RP R4, R14, R4 ; /* 0x000000040e047228 */
/* 0x000e220000008000 */
/*0980*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x000fcc00078e00ff */
/*0990*/ DFMA R2, R10, -R2, R14 ; /* 0x800000020a02722b */
/* 0x000e46000000000e */
/*09a0*/ LOP3.LUT R7, R5, R7, RZ, 0x3c, !PT ; /* 0x0000000705077212 */
/* 0x001fc600078e3cff */
/*09b0*/ IADD3 R2, -R17, -0x43300000, RZ ; /* 0xbcd0000011027810 */
/* 0x002fc80007ffe1ff */
/*09c0*/ FSETP.NEU.AND P0, PT, |R3|, R2, PT ; /* 0x000000020300720b */
/* 0x000fc80003f0d200 */
/*09d0*/ FSEL R10, R4, R10, !P0 ; /* 0x0000000a040a7208 */
/* 0x000fe40004000000 */
/*09e0*/ FSEL R11, R7, R11, !P0 ; /* 0x0000000b070b7208 */
/* 0x000fe20004000000 */
/*09f0*/ BRA 0xb80 ; /* 0x0000018000007947 */
/* 0x000fea0003800000 */
/*0a00*/ DSETP.NAN.AND P0, PT, R4, R4, PT ; /* 0x000000040400722a */
/* 0x003e1c0003f08000 */
/*0a10*/ @P0 BRA 0xb60 ; /* 0x0000014000000947 */
/* 0x001fea0003800000 */
/*0a20*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff027624 */
/* 0x000fe400078e00ff */
/*0a30*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff037624 */
/* 0x000fcc00078e00ff */
/*0a40*/ DSETP.NAN.AND P0, PT, R2, c[0x0][0x178], PT ; /* 0x00005e000200762a */
/* 0x000e1c0003f08000 */
/*0a50*/ @P0 BRA 0xb30 ; /* 0x000000d000000947 */
/* 0x001fea0003800000 */
/*0a60*/ ISETP.NE.AND P0, PT, R21, R22, PT ; /* 0x000000161500720c */
/* 0x000fe20003f05270 */
/*0a70*/ IMAD.MOV.U32 R10, RZ, RZ, 0x0 ; /* 0x00000000ff0a7424 */
/* 0x000fe400078e00ff */
/*0a80*/ IMAD.MOV.U32 R11, RZ, RZ, -0x80000 ; /* 0xfff80000ff0b7424 */
/* 0x000fd400078e00ff */
/*0a90*/ @!P0 BRA 0xb80 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0aa0*/ ISETP.NE.AND P0, PT, R21, 0x7ff00000, PT ; /* 0x7ff000001500780c */
/* 0x000fe40003f05270 */
/*0ab0*/ LOP3.LUT R4, R5, c[0x0][0x17c], RZ, 0x3c, !PT ; /* 0x00005f0005047a12 */
/* 0x000fe400078e3cff */
/*0ac0*/ ISETP.EQ.OR P0, PT, R22, RZ, !P0 ; /* 0x000000ff1600720c */
/* 0x000fe40004702670 */
/*0ad0*/ LOP3.LUT R11, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x80000000040b7812 */
/* 0x000fd600078ec0ff */
/*0ae0*/ @P0 LOP3.LUT R2, R11, 0x7ff00000, RZ, 0xfc, !PT ; /* 0x7ff000000b020812 */
/* 0x000fe200078efcff */
/*0af0*/ @!P0 IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a8224 */
/* 0x000fe400078e00ff */
/*0b00*/ @P0 IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a0224 */
/* 0x000fe400078e00ff */
/*0b10*/ @P0 IMAD.MOV.U32 R11, RZ, RZ, R2 ; /* 0x000000ffff0b0224 */
/* 0x000fe200078e0002 */
/*0b20*/ BRA 0xb80 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*0b30*/ LOP3.LUT R11, R18, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000120b7812 */
/* 0x000fe200078efcff */
/*0b40*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff0a7624 */
/* 0x000fe200078e00ff */
/*0b50*/ BRA 0xb80 ; /* 0x0000002000007947 */
/* 0x000fea0003800000 */
/*0b60*/ LOP3.LUT R11, R5, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000050b7812 */
/* 0x000fe400078efcff */
/*0b70*/ MOV R10, R4 ; /* 0x00000004000a7202 */
/* 0x000fc40000000f00 */
/*0b80*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0b90*/ IMAD.MOV.U32 R17, RZ, RZ, 0x0 ; /* 0x00000000ff117424 */
/* 0x000fe400078e00ff */
/*0ba0*/ IMAD.MOV.U32 R4, RZ, RZ, R10 ; /* 0x000000ffff047224 */
/* 0x000fc400078e000a */
/*0bb0*/ IMAD.MOV.U32 R5, RZ, RZ, R11 ; /* 0x000000ffff057224 */
/* 0x000fe200078e000b */
/*0bc0*/ RET.REL.NODEC R16 0x0 ; /* 0xfffff43010007950 */
/* 0x000fec0003c3ffff */
/*0bd0*/ ISETP.GE.U32.AND P0, PT, R4, -0x3400000, PT ; /* 0xfcc000000400780c */
/* 0x000fe20003f06070 */
/*0be0*/ BSSY B1, 0xe50 ; /* 0x0000026000017945 */
/* 0x000fe20003800000 */
/*0bf0*/ IMAD.MOV.U32 R10, RZ, RZ, R12 ; /* 0x000000ffff0a7224 */
/* 0x000fe400078e000c */
/*0c00*/ IMAD.MOV.U32 R4, RZ, RZ, R16 ; /* 0x000000ffff047224 */
/* 0x000fe400078e0010 */
/*0c10*/ IMAD.MOV.U32 R5, RZ, RZ, R17 ; /* 0x000000ffff057224 */
/* 0x000fce00078e0011 */
/*0c20*/ @!P0 BRA 0xcb0 ; /* 0x0000008000008947 */
/* 0x000fea0003800000 */
/*0c30*/ DFMA.RM R4, R4, R10, R14 ; /* 0x0000000a0404722b */
/* 0x000e14000000400e */
/*0c40*/ IADD3 R10, P0, R4, 0x1, RZ ; /* 0x00000001040a7810 */
/* 0x001fca0007f1e0ff */
/*0c50*/ IMAD.X R11, RZ, RZ, R5, P0 ; /* 0x000000ffff0b7224 */
/* 0x000fcc00000e0605 */
/*0c60*/ DFMA.RP R6, -R4, R10, R6 ; /* 0x0000000a0406722b */
/* 0x000e0c0000008106 */
/*0c70*/ DSETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600722a */
/* 0x001e0c0003f04000 */
/*0c80*/ FSEL R4, R10, R4, P0 ; /* 0x000000040a047208 */
/* 0x001fe40000000000 */
/*0c90*/ FSEL R5, R11, R5, P0 ; /* 0x000000050b057208 */
/* 0x000fe20000000000 */
/*0ca0*/ BRA 0xe40 ; /* 0x0000019000007947 */
/* 0x000fea0003800000 */
/*0cb0*/ DSETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600722a */
/* 0x000e1c0003f05000 */
/*0cc0*/ @!P0 BRA 0xe30 ; /* 0x0000016000008947 */
/* 0x001fea0003800000 */
/*0cd0*/ ISETP.GE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fda0003f06270 */
/*0ce0*/ @!P0 IMAD.MOV.U32 R4, RZ, RZ, 0x0 ; /* 0x00000000ff048424 */
/* 0x000fe400078e00ff */
/*0cf0*/ @!P0 IMAD.MOV.U32 R5, RZ, RZ, -0x80000 ; /* 0xfff80000ff058424 */
/* 0x000fe200078e00ff */
/*0d00*/ @!P0 BRA 0xe40 ; /* 0x0000013000008947 */
/* 0x000fea0003800000 */
/*0d10*/ ISETP.GT.AND P0, PT, R7, 0x7fefffff, PT ; /* 0x7fefffff0700780c */
/* 0x000fda0003f04270 */
/*0d20*/ @P0 BRA 0xe30 ; /* 0x0000010000000947 */
/* 0x000fea0003800000 */
/*0d30*/ DMUL R4, R6, 8.11296384146066816958e+31 ; /* 0x4690000006047828 */
/* 0x0000620000000000 */
/*0d40*/ IMAD.MOV.U32 R12, RZ, RZ, 0x0 ; /* 0x00000000ff0c7424 */
/* 0x000fe400078e00ff */
/*0d50*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x001fe400078e00ff */
/*0d60*/ IMAD.MOV.U32 R13, RZ, RZ, 0x3fd80000 ; /* 0x3fd80000ff0d7424 */
/* 0x000fe200078e00ff */
/*0d70*/ MUFU.RSQ64H R7, R5 ; /* 0x0000000500077308 */
/* 0x002e260000001c00 */
/*0d80*/ DMUL R10, R6, R6 ; /* 0x00000006060a7228 */
/* 0x001e0c0000000000 */
/*0d90*/ DFMA R10, R4, -R10, 1 ; /* 0x3ff00000040a742b */
/* 0x001e0c000000080a */
/*0da0*/ DFMA R12, R10, R12, 0.5 ; /* 0x3fe000000a0c742b */
/* 0x001fc8000000000c */
/*0db0*/ DMUL R10, R6, R10 ; /* 0x0000000a060a7228 */
/* 0x000e0c0000000000 */
/*0dc0*/ DFMA R10, R12, R10, R6 ; /* 0x0000000a0c0a722b */
/* 0x001e0c0000000006 */
/*0dd0*/ DMUL R6, R4, R10 ; /* 0x0000000a04067228 */
/* 0x0010480000000000 */
/*0de0*/ IADD3 R11, R11, -0x100000, RZ ; /* 0xfff000000b0b7810 */
/* 0x001fe40007ffe0ff */
/*0df0*/ DFMA R12, R6, -R6, R4 ; /* 0x80000006060c722b */
/* 0x002e0c0000000004 */
/*0e00*/ DFMA R4, R10, R12, R6 ; /* 0x0000000c0a04722b */
/* 0x001e140000000006 */
/*0e10*/ IADD3 R5, R5, -0x3500000, RZ ; /* 0xfcb0000005057810 */
/* 0x001fe20007ffe0ff */
/*0e20*/ BRA 0xe40 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0e30*/ DADD R4, R6, R6 ; /* 0x0000000006047229 */
/* 0x00004c0000000006 */
/*0e40*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0e50*/ IMAD.MOV.U32 R3, RZ, RZ, 0x0 ; /* 0x00000000ff037424 */
/* 0x000fc800078e00ff */
/*0e60*/ RET.REL.NODEC R2 0x0 ; /* 0xfffff19002007950 */
/* 0x000fea0003c3ffff */
/*0e70*/ BRA 0xe70; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0e80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ea0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0eb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ec0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ed0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ee0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ef0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10PDH_kernelP10hist_entryP8atomdescxd
.globl _Z10PDH_kernelP10hist_entryP8atomdescxd
.p2align 8
.type _Z10PDH_kernelP10hist_entryP8atomdescxd,@function
_Z10PDH_kernelP10hist_entryP8atomdescxd:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_mul_i32 s15, s15, s4
s_mov_b32 s4, exec_lo
v_add_nc_u32_e32 v2, s15, v0
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i64_e64 s[2:3], v[2:3]
s_cbranch_execz .LBB0_4
v_add_nc_u32_e32 v4, 1, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_cmp_gt_i64_e32 vcc_lo, s[2:3], v[4:5]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_4
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x18
v_add3_u32 v3, v0, s15, 2
s_mov_b32 s8, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v6, 31, v3
s_waitcnt lgkmcnt(0)
v_mad_i64_i32 v[0:1], null, v2, 24, s[6:7]
v_add_co_u32 v2, vcc_lo, v3, -1
v_add_co_ci_u32_e32 v3, vcc_lo, -1, v6, vcc_lo
.LBB0_3:
v_mad_u64_u32 v[8:9], null, v4, 24, s[6:7]
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v10, vcc_lo, v0, 16
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v1, vcc_lo
s_clause 0x1
global_load_b64 v[12:13], v[0:1], off
global_load_b64 v[14:15], v[10:11], off offset:-8
v_mov_b32_e32 v4, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[6:7], null, v5, 24, v[4:5]
v_mov_b32_e32 v9, v6
s_clause 0x2
global_load_b128 v[4:7], v[8:9], off
global_load_b64 v[10:11], v[10:11], off
global_load_b64 v[8:9], v[8:9], off offset:16
s_waitcnt vmcnt(2)
v_add_f64 v[6:7], v[14:15], -v[6:7]
v_add_f64 v[4:5], v[12:13], -v[4:5]
s_waitcnt vmcnt(0)
v_add_f64 v[8:9], v[10:11], -v[8:9]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[6:7], v[6:7], v[6:7]
v_fma_f64 v[4:5], v[4:5], v[4:5], v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[4:5], v[8:9], v[8:9], v[4:5]
v_cmp_gt_f64_e32 vcc_lo, 0x10000000, v[4:5]
v_cndmask_b32_e64 v6, 0, 1, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v6, 8, v6
v_ldexp_f64 v[4:5], v[4:5], v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_rsq_f64_e32 v[6:7], v[4:5]
s_waitcnt_depctr 0xfff
v_mul_f64 v[8:9], v[4:5], v[6:7]
v_mul_f64 v[6:7], v[6:7], 0.5
v_fma_f64 v[10:11], -v[6:7], v[8:9], 0.5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
v_fma_f64 v[6:7], v[6:7], v[10:11], v[6:7]
v_fma_f64 v[10:11], -v[8:9], v[8:9], v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[8:9], v[10:11], v[6:7], v[8:9]
v_fma_f64 v[10:11], -v[8:9], v[8:9], v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_fma_f64 v[6:7], v[10:11], v[6:7], v[8:9]
v_cndmask_b32_e64 v8, 0, 0xffffff80, vcc_lo
v_cmp_class_f64_e64 vcc_lo, v[4:5], 0x260
v_ldexp_f64 v[6:7], v[6:7], v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v5, v7, v5 :: v_dual_cndmask_b32 v4, v6, v4
v_div_scale_f64 v[6:7], null, s[0:1], s[0:1], v[4:5]
v_div_scale_f64 v[12:13], vcc_lo, v[4:5], s[0:1], v[4:5]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f64_e32 v[8:9], v[6:7]
s_waitcnt_depctr 0xfff
v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[10:11], v[12:13], v[8:9]
v_fma_f64 v[6:7], -v[6:7], v[10:11], v[12:13]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f64 v[6:7], v[6:7], v[8:9], v[10:11]
v_div_fixup_f64 v[4:5], v[6:7], s[0:1], v[4:5]
v_mov_b32_e32 v6, 1
v_mov_b32_e32 v7, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_i32_f64_e32 v4, v[4:5]
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 3, v[4:5]
v_add_co_u32 v4, vcc_lo, s4, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
v_add_co_u32 v2, vcc_lo, v2, 1
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
global_atomic_add_u64 v[4:5], v[6:7], off
v_mov_b32_e32 v4, v2
v_ashrrev_i32_e32 v5, 31, v2
v_cmp_le_i64_e32 vcc_lo, s[2:3], v[2:3]
s_or_b32 s8, vcc_lo, s8
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s8
s_cbranch_execnz .LBB0_3
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10PDH_kernelP10hist_entryP8atomdescxd
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 16
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10PDH_kernelP10hist_entryP8atomdescxd, .Lfunc_end0-_Z10PDH_kernelP10hist_entryP8atomdescxd
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z11PDH_kernel2P10hist_entryP8atomdescxd
.globl _Z11PDH_kernel2P10hist_entryP8atomdescxd
.p2align 8
.type _Z11PDH_kernel2P10hist_entryP8atomdescxd,@function
_Z11PDH_kernel2P10hist_entryP8atomdescxd:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11PDH_kernel2P10hist_entryP8atomdescxd
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z11PDH_kernel2P10hist_entryP8atomdescxd, .Lfunc_end1-_Z11PDH_kernel2P10hist_entryP8atomdescxd
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 8
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10PDH_kernelP10hist_entryP8atomdescxd
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10PDH_kernelP10hist_entryP8atomdescxd.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 16
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 8
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11PDH_kernel2P10hist_entryP8atomdescxd
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z11PDH_kernel2P10hist_entryP8atomdescxd.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0015a26d_00000000-6_proj1-tanmay.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2065:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2065:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z12p2p_distanceii
.type _Z12p2p_distanceii, @function
_Z12p2p_distanceii:
.LFB2057:
.cfi_startproc
endbr64
movq atom_list(%rip), %rax
movslq %edi, %rdi
leaq (%rdi,%rdi,2), %rdx
leaq (%rax,%rdx,8), %rdx
movslq %esi, %rsi
leaq (%rsi,%rsi,2), %rcx
leaq (%rax,%rcx,8), %rax
movsd (%rdx), %xmm1
subsd (%rax), %xmm1
movsd 8(%rdx), %xmm2
subsd 8(%rax), %xmm2
movsd 16(%rdx), %xmm0
subsd 16(%rax), %xmm0
mulsd %xmm1, %xmm1
mulsd %xmm2, %xmm2
addsd %xmm2, %xmm1
mulsd %xmm0, %xmm0
addsd %xmm1, %xmm0
sqrtsd %xmm0, %xmm0
ret
.cfi_endproc
.LFE2057:
.size _Z12p2p_distanceii, .-_Z12p2p_distanceii
.globl _Z12PDH_baselinev
.type _Z12PDH_baselinev, @function
_Z12PDH_baselinev:
.LFB2058:
.cfi_startproc
endbr64
cmpq $0, PDH_acnt(%rip)
jle .L11
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl $1, %r13d
movl $0, %r14d
.L7:
movl %r14d, %r12d
movl %r13d, %ebx
cmpq %r13, PDH_acnt(%rip)
jle .L5
movq %r13, %rbp
.L6:
movl %ebx, %esi
movl %r12d, %edi
call _Z12p2p_distanceii
divsd PDH_res(%rip), %xmm0
cvttsd2sil %xmm0, %eax
cltq
movq histogram(%rip), %rdx
addq $1, (%rdx,%rax,8)
addl $1, %ebx
movq PDH_acnt(%rip), %rax
addq $1, %rbp
cmpq %rbp, %rax
jg .L6
addq $1, %r14
addq $1, %r13
cmpq %r14, %rax
jg .L7
.L5:
movl $0, %eax
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
movl $0, %eax
ret
.cfi_endproc
.LFE2058:
.size _Z12PDH_baselinev, .-_Z12PDH_baselinev
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Running time for CPU version: %ld.%06ld\n"
.text
.globl _Z19report_running_timev
.type _Z19report_running_timev, @function
_Z19report_running_timev:
.LFB2059:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
leaq Idunno(%rip), %rsi
leaq endTime(%rip), %rdi
call gettimeofday@PLT
movq endTime(%rip), %rbp
subq startTime(%rip), %rbp
movq 8+endTime(%rip), %rbx
subq 8+startTime(%rip), %rbx
js .L17
.L15:
movq %rbx, %rcx
movq %rbp, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtsi2sdq %rbx, %xmm0
divsd .LC1(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq %rbp, %xmm1
addsd %xmm1, %xmm0
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
subq $1, %rbp
addq $1000000, %rbx
jmp .L15
.cfi_endproc
.LFE2059:
.size _Z19report_running_timev, .-_Z19report_running_timev
.section .rodata.str1.8
.align 8
.LC2:
.string "Running time for GPU version: %ld.%06ld\n"
.text
.globl _Z23report_running_time_GPUv
.type _Z23report_running_time_GPUv, @function
_Z23report_running_time_GPUv:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
leaq Idunno(%rip), %rsi
leaq endTime(%rip), %rdi
call gettimeofday@PLT
movq endTime(%rip), %rbp
subq startTime(%rip), %rbp
movq 8+endTime(%rip), %rbx
subq 8+startTime(%rip), %rbx
js .L21
.L19:
movq %rbx, %rcx
movq %rbp, %rdx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtsi2sdq %rbx, %xmm0
divsd .LC1(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq %rbp, %xmm1
addsd %xmm1, %xmm0
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
subq $1, %rbp
addq $1000000, %rbx
jmp .L19
.cfi_endproc
.LFE2060:
.size _Z23report_running_time_GPUv, .-_Z23report_running_time_GPUv
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "\n%02d: "
.LC4:
.string "%15lld "
.LC5:
.string "\n T:%lld \n"
.LC6:
.string "| "
.text
.globl _Z16output_histogramP10hist_entry
.type _Z16output_histogramP10hist_entry, @function
_Z16output_histogramP10hist_entry:
.LFB2061:
.cfi_startproc
endbr64
cmpl $0, num_buckets(%rip)
jle .L30
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movl $0, %ebx
movl $0, %r12d
leaq .LC4(%rip), %r13
leaq .LC6(%rip), %r14
leaq .LC5(%rip), %r15
jmp .L27
.L34:
movl %ebx, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L24
.L35:
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L26:
addq $1, %rbx
cmpl %ebx, num_buckets(%rip)
jle .L33
.L27:
movslq %ebx, %rax
imulq $1717986919, %rax, %rax
sarq $33, %rax
movl %ebx, %edx
sarl $31, %edx
subl %edx, %eax
leal (%rax,%rax,4), %eax
cmpl %ebx, %eax
je .L34
.L24:
movq 0(%rbp,%rbx,8), %rdx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r12, %rdx
addq 0(%rbp,%rbx,8), %rdx
movq %rdx, %r12
movl num_buckets(%rip), %eax
subl $1, %eax
cmpl %ebx, %eax
je .L35
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L26
.L33:
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L30:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2061:
.size _Z16output_histogramP10hist_entry, .-_Z16output_histogramP10hist_entry
.globl _Z53__device_stub__Z10PDH_kernelP10hist_entryP8atomdescxdP10hist_entryP8atomdescxd
.type _Z53__device_stub__Z10PDH_kernelP10hist_entryP8atomdescxdP10hist_entryP8atomdescxd, @function
_Z53__device_stub__Z10PDH_kernelP10hist_entryP8atomdescxdP10hist_entryP8atomdescxd:
.LFB2087:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movsd %xmm0, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L40
.L36:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L41
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L40:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10PDH_kernelP10hist_entryP8atomdescxd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L36
.L41:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z53__device_stub__Z10PDH_kernelP10hist_entryP8atomdescxdP10hist_entryP8atomdescxd, .-_Z53__device_stub__Z10PDH_kernelP10hist_entryP8atomdescxdP10hist_entryP8atomdescxd
.globl _Z10PDH_kernelP10hist_entryP8atomdescxd
.type _Z10PDH_kernelP10hist_entryP8atomdescxd, @function
_Z10PDH_kernelP10hist_entryP8atomdescxd:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z53__device_stub__Z10PDH_kernelP10hist_entryP8atomdescxdP10hist_entryP8atomdescxd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z10PDH_kernelP10hist_entryP8atomdescxd, .-_Z10PDH_kernelP10hist_entryP8atomdescxd
.section .rodata.str1.1
.LC10:
.string "Starting CPU...\n"
.LC11:
.string "Starting GPU...\n"
.LC16:
.string "Difference: \n"
.text
.globl main
.type main, @function
main:
.LFB2062:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $40, %rsp
.cfi_def_cfa_offset 64
movq %rsi, %rbx
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
cltq
movq %rax, PDH_acnt(%rip)
movq 16(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, PDH_res(%rip)
movsd .LC7(%rip), %xmm1
divsd %xmm0, %xmm1
cvttsd2sil %xmm1, %edi
addl $1, %edi
movl %edi, num_buckets(%rip)
movslq %edi, %rdi
salq $3, %rdi
call malloc@PLT
movq %rax, histogram(%rip)
movq PDH_acnt(%rip), %rax
leaq (%rax,%rax,2), %rdi
salq $3, %rdi
call malloc@PLT
movq %rax, atom_list(%rip)
movl $1, %edi
call srand@PLT
cmpq $0, PDH_acnt(%rip)
jle .L45
movl $0, %ebx
movl $0, %ebp
.L46:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
divsd .LC8(%rip), %xmm0
mulsd .LC9(%rip), %xmm0
movq atom_list(%rip), %rax
movsd %xmm0, (%rax,%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
divsd .LC8(%rip), %xmm0
mulsd .LC9(%rip), %xmm0
movq atom_list(%rip), %rax
movsd %xmm0, 8(%rax,%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
divsd .LC8(%rip), %xmm0
mulsd .LC9(%rip), %xmm0
movq atom_list(%rip), %rax
movsd %xmm0, 16(%rax,%rbx)
addq $1, %rbp
addq $24, %rbx
cmpq %rbp, PDH_acnt(%rip)
jg .L46
.L45:
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq Idunno(%rip), %rbp
movq %rbp, %rsi
leaq startTime(%rip), %rbx
movq %rbx, %rdi
call gettimeofday@PLT
call _Z12PDH_baselinev
call _Z19report_running_timev
movq histogram(%rip), %rdi
call _Z16output_histogramP10hist_entry
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movslq num_buckets(%rip), %rdi
salq $3, %rdi
call malloc@PLT
movq %rax, h_gpu_histogram(%rip)
movq PDH_acnt(%rip), %rax
leaq (%rax,%rax,2), %rsi
salq $3, %rsi
leaq d_atom_list(%rip), %rdi
call cudaMalloc@PLT
movq PDH_acnt(%rip), %rax
leaq (%rax,%rax,2), %rdx
salq $3, %rdx
movl $1, %ecx
movq atom_list(%rip), %rsi
movq d_atom_list(%rip), %rdi
call cudaMemcpy@PLT
movslq num_buckets(%rip), %rsi
salq $3, %rsi
leaq d_gpu_histogram(%rip), %rdi
call cudaMalloc@PLT
movslq num_buckets(%rip), %rdx
salq $3, %rdx
movl $1, %ecx
movq h_gpu_histogram(%rip), %rsi
movq d_gpu_histogram(%rip), %rdi
call cudaMemcpy@PLT
movq %rbp, %rsi
movq %rbx, %rdi
call gettimeofday@PLT
movl $256, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
pxor %xmm0, %xmm0
cvtsi2sdq PDH_acnt(%rip), %xmm0
mulsd .LC12(%rip), %xmm0
movapd %xmm0, %xmm3
movsd .LC17(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC13(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L47
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC15(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L47:
cvttsd2siq %xmm3, %rax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl 28(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L54
.L48:
movslq num_buckets(%rip), %rdx
salq $3, %rdx
movl $2, %ecx
movq d_gpu_histogram(%rip), %rsi
movq h_gpu_histogram(%rip), %rdi
call cudaMemcpy@PLT
call _Z23report_running_time_GPUv
movq h_gpu_histogram(%rip), %rdi
call _Z16output_histogramP10hist_entry
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl num_buckets(%rip), %ebp
movslq %ebp, %rbx
salq $3, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, diff_histogram(%rip)
testl %ebp, %ebp
jle .L49
movl $0, %eax
.L50:
movq histogram(%rip), %rdx
movq (%rdx,%rax), %rdx
movq h_gpu_histogram(%rip), %rcx
subq (%rcx,%rax), %rdx
movq diff_histogram(%rip), %rcx
movq %rdx, (%rcx,%rax)
addq $8, %rax
cmpq %rax, %rbx
jne .L50
.L49:
movq diff_histogram(%rip), %rdi
call _Z16output_histogramP10hist_entry
movq d_gpu_histogram(%rip), %rdi
call cudaFree@PLT
movq d_atom_list(%rip), %rdi
call cudaFree@PLT
movq histogram(%rip), %rdi
call free@PLT
movq atom_list(%rip), %rdi
call free@PLT
movq h_gpu_histogram(%rip), %rdi
call free@PLT
movq diff_histogram(%rip), %rdi
call free@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L54:
.cfi_restore_state
movsd PDH_res(%rip), %xmm0
movq PDH_acnt(%rip), %rdx
movq d_atom_list(%rip), %rsi
movq d_gpu_histogram(%rip), %rdi
call _Z53__device_stub__Z10PDH_kernelP10hist_entryP8atomdescxdP10hist_entryP8atomdescxd
jmp .L48
.cfi_endproc
.LFE2062:
.size main, .-main
.globl _Z54__device_stub__Z11PDH_kernel2P10hist_entryP8atomdescxdP10hist_entryP8atomdescxd
.type _Z54__device_stub__Z11PDH_kernel2P10hist_entryP8atomdescxdP10hist_entryP8atomdescxd, @function
_Z54__device_stub__Z11PDH_kernel2P10hist_entryP8atomdescxdP10hist_entryP8atomdescxd:
.LFB2089:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movsd %xmm0, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L59
.L55:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L60
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L59:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11PDH_kernel2P10hist_entryP8atomdescxd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L55
.L60:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2089:
.size _Z54__device_stub__Z11PDH_kernel2P10hist_entryP8atomdescxdP10hist_entryP8atomdescxd, .-_Z54__device_stub__Z11PDH_kernel2P10hist_entryP8atomdescxdP10hist_entryP8atomdescxd
.globl _Z11PDH_kernel2P10hist_entryP8atomdescxd
.type _Z11PDH_kernel2P10hist_entryP8atomdescxd, @function
_Z11PDH_kernel2P10hist_entryP8atomdescxd:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z54__device_stub__Z11PDH_kernel2P10hist_entryP8atomdescxdP10hist_entryP8atomdescxd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _Z11PDH_kernel2P10hist_entryP8atomdescxd, .-_Z11PDH_kernel2P10hist_entryP8atomdescxd
.section .rodata.str1.8
.align 8
.LC18:
.string "_Z11PDH_kernel2P10hist_entryP8atomdescxd"
.align 8
.LC19:
.string "_Z10PDH_kernelP10hist_entryP8atomdescxd"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2092:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC18(%rip), %rdx
movq %rdx, %rcx
leaq _Z11PDH_kernel2P10hist_entryP8atomdescxd(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z10PDH_kernelP10hist_entryP8atomdescxd(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl endTime
.bss
.align 16
.type endTime, @object
.size endTime, 16
endTime:
.zero 16
.globl startTime
.align 16
.type startTime, @object
.size startTime, 16
startTime:
.zero 16
.globl Idunno
.align 8
.type Idunno, @object
.size Idunno, 8
Idunno:
.zero 8
.globl d_atom_list
.align 8
.type d_atom_list, @object
.size d_atom_list, 8
d_atom_list:
.zero 8
.globl atom_list
.align 8
.type atom_list, @object
.size atom_list, 8
atom_list:
.zero 8
.globl PDH_res
.align 8
.type PDH_res, @object
.size PDH_res, 8
PDH_res:
.zero 8
.globl num_buckets
.align 4
.type num_buckets, @object
.size num_buckets, 4
num_buckets:
.zero 4
.globl PDH_acnt
.align 8
.type PDH_acnt, @object
.size PDH_acnt, 8
PDH_acnt:
.zero 8
.globl diff_histogram
.align 8
.type diff_histogram, @object
.size diff_histogram, 8
diff_histogram:
.zero 8
.globl d_gpu_histogram
.align 8
.type d_gpu_histogram, @object
.size d_gpu_histogram, 8
d_gpu_histogram:
.zero 8
.globl h_gpu_histogram
.align 8
.type h_gpu_histogram, @object
.size h_gpu_histogram, 8
h_gpu_histogram:
.zero 8
.globl histogram
.align 8
.type histogram, @object
.size histogram, 8
histogram:
.zero 8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1093567616
.align 8
.LC7:
.long 0
.long 1088648064
.align 8
.LC8:
.long -4194304
.long 1105199103
.align 8
.LC9:
.long 0
.long 1087796736
.align 8
.LC12:
.long 0
.long 1064304640
.align 8
.LC13:
.long 0
.long 1127219200
.align 8
.LC15:
.long 0
.long 1072693248
.align 8
.LC17:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "proj1-tanmay.hip"
.globl _Z12p2p_distanceii # -- Begin function _Z12p2p_distanceii
.p2align 4, 0x90
.type _Z12p2p_distanceii,@function
_Z12p2p_distanceii: # @_Z12p2p_distanceii
.cfi_startproc
# %bb.0:
movq atom_list(%rip), %rax
movslq %edi, %rcx
leaq (%rcx,%rcx,2), %rcx
movslq %esi, %rdx
leaq (%rdx,%rdx,2), %rdx
movsd (%rax,%rcx,8), %xmm1 # xmm1 = mem[0],zero
movsd 8(%rax,%rcx,8), %xmm2 # xmm2 = mem[0],zero
movsd 16(%rax,%rcx,8), %xmm0 # xmm0 = mem[0],zero
subsd (%rax,%rdx,8), %xmm1
mulsd %xmm1, %xmm1
subsd 8(%rax,%rdx,8), %xmm2
mulsd %xmm2, %xmm2
addsd %xmm1, %xmm2
subsd 16(%rax,%rdx,8), %xmm0
mulsd %xmm0, %xmm0
addsd %xmm2, %xmm0
xorpd %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jb sqrt # TAILCALL
# %bb.1: # %.split
sqrtsd %xmm0, %xmm0
retq
.Lfunc_end0:
.size _Z12p2p_distanceii, .Lfunc_end0-_Z12p2p_distanceii
.cfi_endproc
# -- End function
.globl _Z12PDH_baselinev # -- Begin function _Z12PDH_baselinev
.p2align 4, 0x90
.type _Z12PDH_baselinev,@function
_Z12PDH_baselinev: # @_Z12PDH_baselinev
.cfi_startproc
# %bb.0:
cmpq $0, PDH_acnt(%rip)
jle .LBB1_10
# %bb.1: # %.lr.ph11.preheader
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $1, %ebx
movl $40, %r14d
xorl %r15d, %r15d
xorpd %xmm3, %xmm3
jmp .LBB1_3
.p2align 4, 0x90
.LBB1_2: # %.loopexit
# in Loop: Header=BB1_3 Depth=1
incq %rbx
addq $24, %r14
cmpq %r15, PDH_acnt(%rip)
jle .LBB1_9
.LBB1_3: # %.lr.ph11
# =>This Loop Header: Depth=1
# Child Loop BB1_5 Depth 2
movq %r15, %rax
incq %r15
cmpq %r15, PDH_acnt(%rip)
jle .LBB1_2
# %bb.4: # %.lr.ph.preheader
# in Loop: Header=BB1_3 Depth=1
leaq (%rax,%rax,2), %r12
movq %r14, %r13
movq %rbx, %rbp
jmp .LBB1_5
.p2align 4, 0x90
.LBB1_7: # %call.sqrt
# in Loop: Header=BB1_5 Depth=2
callq sqrt
xorpd %xmm3, %xmm3
.LBB1_8: # %.lr.ph.split
# in Loop: Header=BB1_5 Depth=2
divsd PDH_res(%rip), %xmm0
cvttsd2si %xmm0, %eax
movq histogram(%rip), %rcx
cltq
incq (%rcx,%rax,8)
incq %rbp
addq $24, %r13
cmpq %rbp, PDH_acnt(%rip)
jle .LBB1_2
.LBB1_5: # %.lr.ph
# Parent Loop BB1_3 Depth=1
# => This Inner Loop Header: Depth=2
movq atom_list(%rip), %rax
movsd (%rax,%r12,8), %xmm1 # xmm1 = mem[0],zero
movsd 8(%rax,%r12,8), %xmm2 # xmm2 = mem[0],zero
subsd -16(%rax,%r13), %xmm1
movsd 16(%rax,%r12,8), %xmm0 # xmm0 = mem[0],zero
mulsd %xmm1, %xmm1
subsd -8(%rax,%r13), %xmm2
mulsd %xmm2, %xmm2
addsd %xmm1, %xmm2
subsd (%rax,%r13), %xmm0
mulsd %xmm0, %xmm0
addsd %xmm2, %xmm0
ucomisd %xmm3, %xmm0
jb .LBB1_7
# %bb.6: # in Loop: Header=BB1_5 Depth=2
sqrtsd %xmm0, %xmm0
jmp .LBB1_8
.LBB1_9:
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.LBB1_10: # %._crit_edge
xorl %eax, %eax
retq
.Lfunc_end1:
.size _Z12PDH_baselinev, .Lfunc_end1-_Z12PDH_baselinev
.cfi_endproc
# -- End function
.globl _Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd # -- Begin function _Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd
.p2align 4, 0x90
.type _Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd,@function
_Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd: # @_Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movsd %xmm0, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10PDH_kernelP10hist_entryP8atomdescxd, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd, .Lfunc_end2-_Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd
.cfi_endproc
# -- End function
.globl _Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd # -- Begin function _Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd
.p2align 4, 0x90
.type _Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd,@function
_Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd: # @_Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movsd %xmm0, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11PDH_kernel2P10hist_entryP8atomdescxd, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size _Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd, .Lfunc_end3-_Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z19report_running_timev
.LCPI4_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z19report_running_timev
.p2align 4, 0x90
.type _Z19report_running_timev,@function
_Z19report_running_timev: # @_Z19report_running_timev
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $endTime, %edi
movl $Idunno, %esi
callq gettimeofday
movq endTime(%rip), %rax
subq startTime(%rip), %rax
movq endTime+8(%rip), %rcx
subq startTime+8(%rip), %rcx
leaq 1000000(%rcx), %rbx
movq %rcx, %r14
sarq $63, %r14
addq %rax, %r14
testq %rcx, %rcx
cmovnsq %rcx, %rbx
movl $.L.str, %edi
movq %r14, %rsi
movq %rbx, %rdx
xorl %eax, %eax
callq printf
cvtsi2sd %r14, %xmm1
cvtsi2sd %rbx, %xmm0
divsd .LCPI4_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z19report_running_timev, .Lfunc_end4-_Z19report_running_timev
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z23report_running_time_GPUv
.LCPI5_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z23report_running_time_GPUv
.p2align 4, 0x90
.type _Z23report_running_time_GPUv,@function
_Z23report_running_time_GPUv: # @_Z23report_running_time_GPUv
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $endTime, %edi
movl $Idunno, %esi
callq gettimeofday
movq endTime(%rip), %rax
subq startTime(%rip), %rax
movq endTime+8(%rip), %rcx
subq startTime+8(%rip), %rcx
leaq 1000000(%rcx), %rbx
movq %rcx, %r14
sarq $63, %r14
addq %rax, %r14
testq %rcx, %rcx
cmovnsq %rcx, %rbx
movl $.L.str.1, %edi
movq %r14, %rsi
movq %rbx, %rdx
xorl %eax, %eax
callq printf
cvtsi2sd %r14, %xmm1
cvtsi2sd %rbx, %xmm0
divsd .LCPI5_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z23report_running_time_GPUv, .Lfunc_end5-_Z23report_running_time_GPUv
.cfi_endproc
# -- End function
.globl _Z16output_histogramP10hist_entry # -- Begin function _Z16output_histogramP10hist_entry
.p2align 4, 0x90
.type _Z16output_histogramP10hist_entry,@function
_Z16output_histogramP10hist_entry: # @_Z16output_histogramP10hist_entry
.cfi_startproc
# %bb.0:
cmpl $0, num_buckets(%rip)
jle .LBB6_9
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl $3435973837, %r12d # imm = 0xCCCCCCCD
xorl %r14d, %r14d
xorl %r15d, %r15d
jmp .LBB6_2
.p2align 4, 0x90
.LBB6_5: # in Loop: Header=BB6_2 Depth=1
movl $.L.str.4, %edi
movq %r15, %rsi
xorl %eax, %eax
callq printf
.LBB6_7: # in Loop: Header=BB6_2 Depth=1
incq %r14
movslq num_buckets(%rip), %rax
cmpq %rax, %r14
jge .LBB6_8
.LBB6_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %r14d, %eax
imulq %r12, %rax
shrq $34, %rax
leal (%rax,%rax,4), %eax
cmpl %eax, %r14d
jne .LBB6_4
# %bb.3: # in Loop: Header=BB6_2 Depth=1
movl $.L.str.2, %edi
movl %r14d, %esi
xorl %eax, %eax
callq printf
.LBB6_4: # in Loop: Header=BB6_2 Depth=1
movq (%rbx,%r14,8), %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
addq (%rbx,%r14,8), %r15
movl num_buckets(%rip), %eax
decl %eax
cmpq %rax, %r14
je .LBB6_5
# %bb.6: # in Loop: Header=BB6_2 Depth=1
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
jmp .LBB6_7
.LBB6_8:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r14
.cfi_restore %r15
.LBB6_9: # %._crit_edge
retq
.Lfunc_end6:
.size _Z16output_histogramP10hist_entry, .Lfunc_end6-_Z16output_histogramP10hist_entry
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI7_0:
.quad 0x40e3738000000000 # double 39836
.LCPI7_1:
.quad 0x41dfffffffc00000 # double 2147483647
.LCPI7_2:
.quad 0x40d6760000000000 # double 23000
.LCPI7_3:
.quad 0x3f70000000000000 # double 0.00390625
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
cltq
movq %rax, PDH_acnt(%rip)
movq 16(%rbx), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, PDH_res(%rip)
movsd .LCPI7_0(%rip), %xmm1 # xmm1 = mem[0],zero
divsd %xmm0, %xmm1
cvttsd2si %xmm1, %eax
leal 1(%rax), %ecx
movl %ecx, num_buckets(%rip)
cltq
leaq 8(,%rax,8), %rdi
callq malloc
movq %rax, histogram(%rip)
movq PDH_acnt(%rip), %rax
shlq $3, %rax
leaq (%rax,%rax,2), %rdi
callq malloc
movq %rax, atom_list(%rip)
movl $1, %edi
callq srand
cmpq $0, PDH_acnt(%rip)
jle .LBB7_3
# %bb.1: # %.lr.ph.preheader
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB7_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
movsd .LCPI7_1(%rip), %xmm1 # xmm1 = mem[0],zero
divsd %xmm1, %xmm0
movsd .LCPI7_2(%rip), %xmm1 # xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movq atom_list(%rip), %rax
movsd %xmm0, (%rax,%rbx)
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd .LCPI7_1(%rip), %xmm0
mulsd .LCPI7_2(%rip), %xmm0
movq atom_list(%rip), %rax
movsd %xmm0, 8(%rax,%rbx)
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
divsd .LCPI7_1(%rip), %xmm0
mulsd .LCPI7_2(%rip), %xmm0
movq atom_list(%rip), %rax
movsd %xmm0, 16(%rax,%rbx)
incq %r14
addq $24, %rbx
cmpq %r14, PDH_acnt(%rip)
jg .LBB7_2
.LBB7_3: # %._crit_edge
movl $.Lstr, %edi
callq puts@PLT
movl $startTime, %edi
movl $Idunno, %esi
callq gettimeofday
cmpq $0, PDH_acnt(%rip)
jle .LBB7_12
# %bb.4: # %.lr.ph11.i.preheader
movl $1, %ebx
movl $40, %r14d
xorl %r15d, %r15d
xorpd %xmm3, %xmm3
jmp .LBB7_6
.p2align 4, 0x90
.LBB7_5: # %.loopexit.i
# in Loop: Header=BB7_6 Depth=1
incq %rbx
addq $24, %r14
cmpq %r15, PDH_acnt(%rip)
jle .LBB7_12
.LBB7_6: # %.lr.ph11.i
# =>This Loop Header: Depth=1
# Child Loop BB7_8 Depth 2
movq %r15, %rax
incq %r15
cmpq %r15, PDH_acnt(%rip)
jle .LBB7_5
# %bb.7: # %.lr.ph.i.preheader
# in Loop: Header=BB7_6 Depth=1
leaq (%rax,%rax,2), %r12
movq %r14, %r13
movq %rbx, %rbp
jmp .LBB7_8
.p2align 4, 0x90
.LBB7_10: # %call.sqrt
# in Loop: Header=BB7_8 Depth=2
callq sqrt
xorpd %xmm3, %xmm3
.LBB7_11: # %.lr.ph.i.split
# in Loop: Header=BB7_8 Depth=2
divsd PDH_res(%rip), %xmm0
cvttsd2si %xmm0, %eax
movq histogram(%rip), %rcx
cltq
incq (%rcx,%rax,8)
incq %rbp
addq $24, %r13
cmpq %rbp, PDH_acnt(%rip)
jle .LBB7_5
.LBB7_8: # %.lr.ph.i
# Parent Loop BB7_6 Depth=1
# => This Inner Loop Header: Depth=2
movq atom_list(%rip), %rax
movsd (%rax,%r12,8), %xmm1 # xmm1 = mem[0],zero
movsd 8(%rax,%r12,8), %xmm2 # xmm2 = mem[0],zero
subsd -16(%rax,%r13), %xmm1
movsd 16(%rax,%r12,8), %xmm0 # xmm0 = mem[0],zero
mulsd %xmm1, %xmm1
subsd -8(%rax,%r13), %xmm2
mulsd %xmm2, %xmm2
addsd %xmm1, %xmm2
subsd (%rax,%r13), %xmm0
mulsd %xmm0, %xmm0
addsd %xmm2, %xmm0
ucomisd %xmm3, %xmm0
jb .LBB7_10
# %bb.9: # in Loop: Header=BB7_8 Depth=2
sqrtsd %xmm0, %xmm0
jmp .LBB7_11
.LBB7_12: # %_Z12PDH_baselinev.exit
movl $endTime, %edi
movl $Idunno, %esi
callq gettimeofday
movq endTime(%rip), %rax
subq startTime(%rip), %rax
movq endTime+8(%rip), %rcx
subq startTime+8(%rip), %rcx
leaq 1000000(%rcx), %rdx
movq %rcx, %rsi
sarq $63, %rsi
addq %rax, %rsi
testq %rcx, %rcx
cmovnsq %rcx, %rdx
movl $.L.str, %edi
xorl %eax, %eax
callq printf
cmpl $0, num_buckets(%rip)
jle .LBB7_20
# %bb.13: # %.lr.ph.i17.preheader
movq histogram(%rip), %r15
movl $3435973837, %r12d # imm = 0xCCCCCCCD
xorl %ebx, %ebx
xorl %r14d, %r14d
jmp .LBB7_14
.p2align 4, 0x90
.LBB7_18: # in Loop: Header=BB7_14 Depth=1
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
.LBB7_19: # in Loop: Header=BB7_14 Depth=1
incq %rbx
movslq num_buckets(%rip), %rax
cmpq %rax, %rbx
jge .LBB7_20
.LBB7_14: # %.lr.ph.i17
# =>This Inner Loop Header: Depth=1
movl %ebx, %eax
imulq %r12, %rax
shrq $34, %rax
leal (%rax,%rax,4), %eax
cmpl %eax, %ebx
jne .LBB7_16
# %bb.15: # in Loop: Header=BB7_14 Depth=1
movl $.L.str.2, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
.LBB7_16: # in Loop: Header=BB7_14 Depth=1
movq (%r15,%rbx,8), %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
addq (%r15,%rbx,8), %r14
movl num_buckets(%rip), %eax
decl %eax
cmpq %rax, %rbx
jne .LBB7_18
# %bb.17: # in Loop: Header=BB7_14 Depth=1
movl $.L.str.4, %edi
movq %r14, %rsi
xorl %eax, %eax
callq printf
jmp .LBB7_19
.LBB7_20: # %_Z16output_histogramP10hist_entry.exit
movl $.Lstr.1, %edi
callq puts@PLT
movslq num_buckets(%rip), %rdi
shlq $3, %rdi
callq malloc
movq %rax, h_gpu_histogram(%rip)
movq PDH_acnt(%rip), %rax
shlq $3, %rax
leaq (%rax,%rax,2), %rsi
movl $d_atom_list, %edi
callq hipMalloc
movq d_atom_list(%rip), %rdi
movq atom_list(%rip), %rsi
movq PDH_acnt(%rip), %rax
shlq $3, %rax
leaq (%rax,%rax,2), %rdx
movl $1, %ecx
callq hipMemcpy
movslq num_buckets(%rip), %rsi
shlq $3, %rsi
movl $d_gpu_histogram, %edi
callq hipMalloc
movq d_gpu_histogram(%rip), %rdi
movq h_gpu_histogram(%rip), %rsi
movslq num_buckets(%rip), %rdx
shlq $3, %rdx
movl $1, %ecx
callq hipMemcpy
movl $startTime, %edi
movl $Idunno, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq PDH_acnt(%rip), %xmm0
mulsd .LCPI7_3(%rip), %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %rax
movl %eax, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $256, %rdx # imm = 0x100
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB7_22
# %bb.21:
movq d_gpu_histogram(%rip), %rax
movq d_atom_list(%rip), %rcx
movq PDH_acnt(%rip), %rdx
movsd PDH_res(%rip), %xmm0 # xmm0 = mem[0],zero
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 56(%rsp)
movsd %xmm0, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10PDH_kernelP10hist_entryP8atomdescxd, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB7_22:
movq h_gpu_histogram(%rip), %rdi
movq d_gpu_histogram(%rip), %rsi
movslq num_buckets(%rip), %rdx
shlq $3, %rdx
movl $2, %ecx
callq hipMemcpy
movl $endTime, %edi
movl $Idunno, %esi
callq gettimeofday
movq endTime(%rip), %rax
subq startTime(%rip), %rax
movq endTime+8(%rip), %rcx
subq startTime+8(%rip), %rcx
leaq 1000000(%rcx), %rdx
movq %rcx, %rsi
sarq $63, %rsi
addq %rax, %rsi
testq %rcx, %rcx
cmovnsq %rcx, %rdx
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
cmpl $0, num_buckets(%rip)
jle .LBB7_30
# %bb.23: # %.lr.ph.i23.preheader
movq h_gpu_histogram(%rip), %r15
movl $3435973837, %r12d # imm = 0xCCCCCCCD
xorl %ebx, %ebx
xorl %r14d, %r14d
jmp .LBB7_24
.p2align 4, 0x90
.LBB7_28: # in Loop: Header=BB7_24 Depth=1
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
.LBB7_29: # in Loop: Header=BB7_24 Depth=1
incq %rbx
movslq num_buckets(%rip), %rax
cmpq %rax, %rbx
jge .LBB7_30
.LBB7_24: # %.lr.ph.i23
# =>This Inner Loop Header: Depth=1
movl %ebx, %eax
imulq %r12, %rax
shrq $34, %rax
leal (%rax,%rax,4), %eax
cmpl %eax, %ebx
jne .LBB7_26
# %bb.25: # in Loop: Header=BB7_24 Depth=1
movl $.L.str.2, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
.LBB7_26: # in Loop: Header=BB7_24 Depth=1
movq (%r15,%rbx,8), %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
addq (%r15,%rbx,8), %r14
movl num_buckets(%rip), %eax
decl %eax
cmpq %rax, %rbx
jne .LBB7_28
# %bb.27: # in Loop: Header=BB7_24 Depth=1
movl $.L.str.4, %edi
movq %r14, %rsi
xorl %eax, %eax
callq printf
jmp .LBB7_29
.LBB7_30: # %_Z16output_histogramP10hist_entry.exit27
movl $.Lstr.2, %edi
callq puts@PLT
movslq num_buckets(%rip), %r14
leaq (,%r14,8), %rdi
callq malloc
movq %rax, %rbx
movl %r14d, %eax
movq %rbx, diff_histogram(%rip)
testq %r14, %r14
jle .LBB7_33
# %bb.31: # %.lr.ph39
movq histogram(%rip), %rcx
movq h_gpu_histogram(%rip), %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB7_32: # =>This Inner Loop Header: Depth=1
movq (%rcx,%rsi,8), %rdi
subq (%rdx,%rsi,8), %rdi
movq %rdi, (%rbx,%rsi,8)
incq %rsi
cmpq %rsi, %rax
jne .LBB7_32
.LBB7_33: # %._crit_edge40
testl %eax, %eax
jle .LBB7_41
# %bb.34: # %.lr.ph.i28.preheader
movl $3435973837, %r12d # imm = 0xCCCCCCCD
xorl %r14d, %r14d
xorl %r15d, %r15d
jmp .LBB7_35
.p2align 4, 0x90
.LBB7_39: # in Loop: Header=BB7_35 Depth=1
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
.LBB7_40: # in Loop: Header=BB7_35 Depth=1
incq %r14
movslq num_buckets(%rip), %rax
cmpq %rax, %r14
jge .LBB7_41
.LBB7_35: # %.lr.ph.i28
# =>This Inner Loop Header: Depth=1
movl %r14d, %eax
imulq %r12, %rax
shrq $34, %rax
leal (%rax,%rax,4), %eax
cmpl %eax, %r14d
jne .LBB7_37
# %bb.36: # in Loop: Header=BB7_35 Depth=1
movl $.L.str.2, %edi
movl %r14d, %esi
xorl %eax, %eax
callq printf
.LBB7_37: # in Loop: Header=BB7_35 Depth=1
movq (%rbx,%r14,8), %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
addq (%rbx,%r14,8), %r15
movl num_buckets(%rip), %eax
decl %eax
cmpq %rax, %r14
jne .LBB7_39
# %bb.38: # in Loop: Header=BB7_35 Depth=1
movl $.L.str.4, %edi
movq %r15, %rsi
xorl %eax, %eax
callq printf
jmp .LBB7_40
.LBB7_41: # %_Z16output_histogramP10hist_entry.exit32
movq d_gpu_histogram(%rip), %rdi
callq hipFree
movq d_atom_list(%rip), %rdi
callq hipFree
movq histogram(%rip), %rdi
callq free
movq atom_list(%rip), %rdi
callq free
movq h_gpu_histogram(%rip), %rdi
callq free
movq diff_histogram(%rip), %rdi
callq free
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end7:
.size main, .Lfunc_end7-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10PDH_kernelP10hist_entryP8atomdescxd, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11PDH_kernel2P10hist_entryP8atomdescxd, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type histogram,@object # @histogram
.bss
.globl histogram
.p2align 3, 0x0
histogram:
.quad 0
.size histogram, 8
.type h_gpu_histogram,@object # @h_gpu_histogram
.globl h_gpu_histogram
.p2align 3, 0x0
h_gpu_histogram:
.quad 0
.size h_gpu_histogram, 8
.type d_gpu_histogram,@object # @d_gpu_histogram
.globl d_gpu_histogram
.p2align 3, 0x0
d_gpu_histogram:
.quad 0
.size d_gpu_histogram, 8
.type diff_histogram,@object # @diff_histogram
.globl diff_histogram
.p2align 3, 0x0
diff_histogram:
.quad 0
.size diff_histogram, 8
.type PDH_acnt,@object # @PDH_acnt
.globl PDH_acnt
.p2align 3, 0x0
PDH_acnt:
.quad 0 # 0x0
.size PDH_acnt, 8
.type num_buckets,@object # @num_buckets
.globl num_buckets
.p2align 2, 0x0
num_buckets:
.long 0 # 0x0
.size num_buckets, 4
.type PDH_res,@object # @PDH_res
.globl PDH_res
.p2align 3, 0x0
PDH_res:
.quad 0x0000000000000000 # double 0
.size PDH_res, 8
.type atom_list,@object # @atom_list
.globl atom_list
.p2align 3, 0x0
atom_list:
.quad 0
.size atom_list, 8
.type d_atom_list,@object # @d_atom_list
.globl d_atom_list
.p2align 3, 0x0
d_atom_list:
.quad 0
.size d_atom_list, 8
.type Idunno,@object # @Idunno
.globl Idunno
.p2align 2, 0x0
Idunno:
.zero 8
.size Idunno, 8
.type startTime,@object # @startTime
.globl startTime
.p2align 3, 0x0
startTime:
.zero 16
.size startTime, 16
.type endTime,@object # @endTime
.globl endTime
.p2align 3, 0x0
endTime:
.zero 16
.size endTime, 16
.type _Z10PDH_kernelP10hist_entryP8atomdescxd,@object # @_Z10PDH_kernelP10hist_entryP8atomdescxd
.section .rodata,"a",@progbits
.globl _Z10PDH_kernelP10hist_entryP8atomdescxd
.p2align 3, 0x0
_Z10PDH_kernelP10hist_entryP8atomdescxd:
.quad _Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd
.size _Z10PDH_kernelP10hist_entryP8atomdescxd, 8
.type _Z11PDH_kernel2P10hist_entryP8atomdescxd,@object # @_Z11PDH_kernel2P10hist_entryP8atomdescxd
.globl _Z11PDH_kernel2P10hist_entryP8atomdescxd
.p2align 3, 0x0
_Z11PDH_kernel2P10hist_entryP8atomdescxd:
.quad _Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd
.size _Z11PDH_kernel2P10hist_entryP8atomdescxd, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Running time for CPU version: %ld.%06ld\n"
.size .L.str, 41
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Running time for GPU version: %ld.%06ld\n"
.size .L.str.1, 41
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\n%02d: "
.size .L.str.2, 8
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "%15lld "
.size .L.str.3, 8
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\n T:%lld \n"
.size .L.str.4, 11
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "| "
.size .L.str.5, 3
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10PDH_kernelP10hist_entryP8atomdescxd"
.size .L__unnamed_1, 40
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z11PDH_kernel2P10hist_entryP8atomdescxd"
.size .L__unnamed_2, 41
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Starting CPU..."
.size .Lstr, 16
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Starting GPU..."
.size .Lstr.1, 16
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Difference: "
.size .Lstr.2, 13
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__PDH_kernelP10hist_entryP8atomdescxd
.addrsig_sym _Z26__device_stub__PDH_kernel2P10hist_entryP8atomdescxd
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym d_gpu_histogram
.addrsig_sym d_atom_list
.addrsig_sym Idunno
.addrsig_sym startTime
.addrsig_sym endTime
.addrsig_sym _Z10PDH_kernelP10hist_entryP8atomdescxd
.addrsig_sym _Z11PDH_kernel2P10hist_entryP8atomdescxd
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // Author: Noah Van Der Weide
// 3/30/2020
// upscale the image by doubling height and width
// fill in empty areas according to neighboring pixels and difference thresholds
// THREE APPROACHES
// FIRST APPROACH:
// each thread will process one pixel
// SECOND APPROACH:
// each thread will process one original pixel and surrounding pixels
// THIRD APPROACH:
// each thread will process one original pixel and pixels to the right and below
// Two filling approaches:
// First:
// Tackle everything at once.
// Second:
// Stretch out original image and fill in adjacent pixels with original pixel value,
// Then go through and SAXPY if original pixel differences aren't too great.
// dimension of image: upper left = (0,0), bottom right = (width-1, height-1)
// *img_original is the original image
// *img_new width = *img_original width * 3 - 2
// *img_new width = *img_original height * 3 - 2
// 8 bits per color (0 - 255)
// upscale function is called independently for each color.
// this allows it to be faster for black and white images as it only needs to be called once.
// Can therefore also be applied to images which use a different color map than RGB (JPEG, for example).
/*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <vector>
#include <stdlib.h>
#include <string.h>
#include "upscale.cuh"
#define THREADS_PER_BLOCK 64
//__global__ void upscale_CUDA(unsigned char* dst, unsigned char* src, int src_height, int src_width, int src_channels, int threshold) {
__global__ void upscale_CUDA(unsigned char * dst, unsigned char * src, int src_width, int src_height, int src_channels, unsigned char threshold) {
// not using shared memory right now
// there is 48 KB of shared memory available.
// images are typically more than that, so I'll have to think about how it could be implemented
//extern __shared__ unsigned char pic[];
//int pixel = blockIdx.x * blockdim.x + threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// not relevant to code function, but shows how a thread could access a pixel in every channel.
// pixel values are from 0 to 255.
//for (int k = 0; k < channels; k++){
// img[idx + k];
//}
int dst_width = src_width * 3 - 2;
//int dst_height = src_height * 3 - 2;
//long int dst_elements = dst_width * dst_height * src_channels;
//long int src_elements = src_width * src_height * src_channels;
int src_stride = src_width * src_channels;
int dst_stride = dst_width * src_channels;
// if invalid location do nothing.
//if (i >= dst_width || j >= dst_height) // is that width or width-1?
if (i >= src_width || j >= src_height)
return;
// all channels for a pixel are grouped together. To access an adjacent pixel, you must add by the number of channels.
for (int k = 0; k < src_channels; k++) {
int dst_index = (j * 21 + i * 3) + k; // this is strictly for my predefined dst width and height (*3 -2)
int src_index = (j * src_width + i) + k;
// transfer known src values to dst
// to access different channels, the number of elements of the src/dst image must be added to the respective array index.
dst[dst_index] = src[src_index];
// vertical comparison acts on src image and applies values to dst image
int y_diff = src[src_index + src_stride] - src[src_index];
if (y_diff < threshold) { // apply third-average
// linear fill
int step = y_diff / 3;
dst[dst_index + dst_stride] = src[src_index] + step;
dst[dst_index + 2 * dst_stride] = src[src_index] + step * 2;
}
else { // nearest neighbor
dst[dst_index + dst_stride] = src[src_index];
dst[dst_index + 2 * dst_stride] = src[src_index + src_stride];
}
__syncthreads();
// horizontal
// I know this is painfully inefficient.
int x_diff_0 = src[src_index] - src[src_index + src_channels];
int x_diff_1 = dst[dst_index + dst_stride] - dst[dst_index + dst_stride + src_channels];
int x_diff_2 = dst[dst_index + 2 * dst_stride] - dst[dst_index + 2 * dst_stride + src_channels];
int step = 0;
if (x_diff_0 < threshold) { // apply third-average
// linear fill
step = x_diff_0 / 3;
dst[dst_index + 1] = src[src_index] + step;
dst[dst_index + 2] = src[src_index] + step * 2;
}
else { // nearest neighbor
dst[dst_index + src_channels] = src[src_index];
dst[dst_index + 2*src_channels] = src[src_index + src_channels];
}
if (x_diff_1 < threshold) { // apply third-average
// linear fill
step = x_diff_1 / 3;
dst[dst_index + dst_stride + src_channels] = dst[dst_index + dst_stride] + step;
dst[dst_index + dst_stride + 2*src_channels] = dst[dst_index + dst_stride] + step * 2;
}
else { // nearest neighbor
dst[dst_index + dst_stride + src_channels] = dst[dst_index + dst_stride];
dst[dst_index + dst_stride + 2*src_channels] = dst[dst_index + dst_stride + 3];
}
if (x_diff_2 < threshold) { // apply third-average
// linear fill
step = x_diff_2 / 3;
dst[dst_index + 2 * dst_stride + src_channels] = dst[dst_index + 2 * dst_stride] + step;
dst[dst_index + 2 * dst_stride + 2*src_channels] = dst[dst_index + 2 * dst_stride] + step * 2;
}
else { // nearest neighbor
dst[dst_index + 2 * dst_stride + src_channels] = dst[dst_index + 2 * dst_stride];
dst[dst_index + 2 * dst_stride + 2*src_channels] = dst[dst_index + 2 * dst_stride + 3];
}
__syncthreads();
}
__syncthreads();
}
__global__ void upscale(unsigned char threshold) {
// CUDA timing parameters
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float ms;
// file handling using opencv
// IMREAD_COLOR loads image in BGR 8-bit format
// IMREAD_UNCHANGED includes alpha channel
// IMREAD_GRAYSCALE loads as intensity 0-1
// load image
string image_path = samples::findFile("peppers.png");
Mat src = imread(image_path, IMREAD_COLOR);
// check if image loaded properly
if (src.empty()) {
cout << "Could not read image: " << image_path << endl;
return 1;
}
// ------------------------------------------
// properties of the source and upscaled image
// ------------------------------------------
// input dimensions
int src_height = src.rows;
int src_width = src.cols;
// channels (e.g. Red, Green, Blue)
int channels = src.channels();
//int type = src.type; // CV_8UC3?
// output dimensions
int dst_height = src_height * 3 - 2;
int dst_width = src_width * 3 - 2;
// number of elements (if a picture has 3 channels, this is 3 * pixels)
int dst_elements = dst_width * dst_height * channels;
int src_elements = src_width * src_height * channels;
// number of bytes each image will take
int dst_size = dst_elements * sizeof(unsigned char);
int src_size = src_elements * sizeof(unsigned char);
// create new image with same datatype as input
Mat dst(dst_height, dst_width, CV_8UC3, Scalar(0, 0, 0));
//Mat dst(dst_height, dst_width, type);
// image data for upscale function
unsigned char* src_img = src.data;
unsigned char* dst_img = dst.data;
cout << "Loaded " << image_path << " -- " << src_height << ", " << src_width << " -- Channels: " << channels << endl;
// initialize device variables
unsigned char* dev_src, * dev_dst;
// number of blocks to call in kernel. Max threads per block is usually 1024
int blocks = (src_elements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// allocate memory in GPU
cudaMalloc((void**)&dev_dst, dst_size);
cudaMalloc((void**)&dev_src, src_size);
// used for shared memory if eventually implemented
//cudaMallocManaged(&dst, dst_size);
//cudaMallocManaged(&src, src_size);
// copy data from CPU to GPU
cudaMemcpy(dev_dst, dst_img, dst_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_src, src_img, src_size, cudaMemcpyHostToDevice);
// start timer for performance evaluation
cudaEventRecord(start);
// call upscale function
//upscale_CUDA<<<blocks, THREADS_PER_BLOCK>>> (dev_dst, dev_src, src_elements, src_width, src_height, threshold); // <<<blocks, threads per block, shared mem>>>
dim3 grid(src_width, src_height);
upscale_CUDA < < <grid, 1 > > >(dev_dst, dev_src, src_width, src_height, channels, threshold);
cudaDeviceSynchronize();
// end timer
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
// copy data back from GPU to CPU
cudaMemcpy(dst_img, dev_dst, dst_size, cudaMemcpyDeviceToHost);
cudaMemcpy(src_img, dev_src, dst_size, cudaMemcpyDeviceToHost); // might not need this
// free GPU
cudaFree(dev_dst);
cudaFree(dev_src);
// create output image. I might not need another Mat -- just use 'dst' instead of 'output'
//Mat output = Mat(dst_height, dst_width, type, dst);
imshow("source", src);
imshow("output", dst);
imwrite("upscaled_image.png", dst);
waitKey(0);
std::cout << "\ntime (ms) = " << ms << std::endl;
}
/*
void upscale(unsigned char* dst, unsigned char* src, int src_height, int src_width, int src_channels, int threshold){
unsigned char* dev_src, * dev_dst;
int dst_width = src_width * 3 - 2;
int dst_height = src_height * 3 - 2;
int dst_elements = dst_width * dst_height * src_channels;
int src_elements = src_width * src_height * src_channels;
int dst_size = dst_elements * sizeof(unsigned char);
int src_size = src_elements * sizeof(unsigned char);
int blocks = (src_elements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// allocate memory in GPU
cudaMalloc((void**)&dev_dst, dst_size);
cudaMalloc((void**)&dev_src, src_size);
//cudaMallocManaged(&dst, dst_size);
//cudaMallocManaged(&src, src_size);
// copy data from CPU to GPU
cudaMemcpy(dev_dst, dst, dst_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_src, src, src_size, cudaMemcpyHostToDevice);
upscale_CUDA<<<blocks, THREADS_PER_BLOCK>>> (dev_src, src_height, src_width, src_channels, threshold); // <<<blocks, threads per block, shared mem>>>
cudaDeviceSynchronize();
// copy data back from GPU to CPU
cudaMemcpy(dst, dev_dst, dst_size, cudaMemcpyDeviceToHost);
// free GPU
cudaFree(dev_dst);
cudaFree(dev_src);
}
*/ | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // Author: Noah Van Der Weide
// 3/30/2020
// upscale the image by doubling height and width
// fill in empty areas according to neighboring pixels and difference thresholds
// THREE APPROACHES
// FIRST APPROACH:
// each thread will process one pixel
// SECOND APPROACH:
// each thread will process one original pixel and surrounding pixels
// THIRD APPROACH:
// each thread will process one original pixel and pixels to the right and below
// Two filling approaches:
// First:
// Tackle everything at once.
// Second:
// Stretch out original image and fill in adjacent pixels with original pixel value,
// Then go through and SAXPY if original pixel differences aren't too great.
// dimension of image: upper left = (0,0), bottom right = (width-1, height-1)
// *img_original is the original image
// *img_new width = *img_original width * 3 - 2
// *img_new width = *img_original height * 3 - 2
// 8 bits per color (0 - 255)
// upscale function is called independently for each color.
// this allows it to be faster for black and white images as it only needs to be called once.
// Can therefore also be applied to images which use a different color map than RGB (JPEG, for example).
/*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <vector>
#include <stdlib.h>
#include <string.h>
#include "upscale.cuh"
#define THREADS_PER_BLOCK 64
//__global__ void upscale_CUDA(unsigned char* dst, unsigned char* src, int src_height, int src_width, int src_channels, int threshold) {
__global__ void upscale_CUDA(unsigned char * dst, unsigned char * src, int src_width, int src_height, int src_channels, unsigned char threshold) {
// not using shared memory right now
// there is 48 KB of shared memory available.
// images are typically more than that, so I'll have to think about how it could be implemented
//extern __shared__ unsigned char pic[];
//int pixel = blockIdx.x * blockdim.x + threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// not relevant to code function, but shows how a thread could access a pixel in every channel.
// pixel values are from 0 to 255.
//for (int k = 0; k < channels; k++){
// img[idx + k];
//}
int dst_width = src_width * 3 - 2;
//int dst_height = src_height * 3 - 2;
//long int dst_elements = dst_width * dst_height * src_channels;
//long int src_elements = src_width * src_height * src_channels;
int src_stride = src_width * src_channels;
int dst_stride = dst_width * src_channels;
// if invalid location do nothing.
//if (i >= dst_width || j >= dst_height) // is that width or width-1?
if (i >= src_width || j >= src_height)
return;
// all channels for a pixel are grouped together. To access an adjacent pixel, you must add by the number of channels.
for (int k = 0; k < src_channels; k++) {
int dst_index = (j * 21 + i * 3) + k; // this is strictly for my predefined dst width and height (*3 -2)
int src_index = (j * src_width + i) + k;
// transfer known src values to dst
// to access different channels, the number of elements of the src/dst image must be added to the respective array index.
dst[dst_index] = src[src_index];
// vertical comparison acts on src image and applies values to dst image
int y_diff = src[src_index + src_stride] - src[src_index];
if (y_diff < threshold) { // apply third-average
// linear fill
int step = y_diff / 3;
dst[dst_index + dst_stride] = src[src_index] + step;
dst[dst_index + 2 * dst_stride] = src[src_index] + step * 2;
}
else { // nearest neighbor
dst[dst_index + dst_stride] = src[src_index];
dst[dst_index + 2 * dst_stride] = src[src_index + src_stride];
}
__syncthreads();
// horizontal
// I know this is painfully inefficient.
int x_diff_0 = src[src_index] - src[src_index + src_channels];
int x_diff_1 = dst[dst_index + dst_stride] - dst[dst_index + dst_stride + src_channels];
int x_diff_2 = dst[dst_index + 2 * dst_stride] - dst[dst_index + 2 * dst_stride + src_channels];
int step = 0;
if (x_diff_0 < threshold) { // apply third-average
// linear fill
step = x_diff_0 / 3;
dst[dst_index + 1] = src[src_index] + step;
dst[dst_index + 2] = src[src_index] + step * 2;
}
else { // nearest neighbor
dst[dst_index + src_channels] = src[src_index];
dst[dst_index + 2*src_channels] = src[src_index + src_channels];
}
if (x_diff_1 < threshold) { // apply third-average
// linear fill
step = x_diff_1 / 3;
dst[dst_index + dst_stride + src_channels] = dst[dst_index + dst_stride] + step;
dst[dst_index + dst_stride + 2*src_channels] = dst[dst_index + dst_stride] + step * 2;
}
else { // nearest neighbor
dst[dst_index + dst_stride + src_channels] = dst[dst_index + dst_stride];
dst[dst_index + dst_stride + 2*src_channels] = dst[dst_index + dst_stride + 3];
}
if (x_diff_2 < threshold) { // apply third-average
// linear fill
step = x_diff_2 / 3;
dst[dst_index + 2 * dst_stride + src_channels] = dst[dst_index + 2 * dst_stride] + step;
dst[dst_index + 2 * dst_stride + 2*src_channels] = dst[dst_index + 2 * dst_stride] + step * 2;
}
else { // nearest neighbor
dst[dst_index + 2 * dst_stride + src_channels] = dst[dst_index + 2 * dst_stride];
dst[dst_index + 2 * dst_stride + 2*src_channels] = dst[dst_index + 2 * dst_stride + 3];
}
__syncthreads();
}
__syncthreads();
}
__global__ void upscale(unsigned char threshold) {
// CUDA timing parameters
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float ms;
// file handling using opencv
// IMREAD_COLOR loads image in BGR 8-bit format
// IMREAD_UNCHANGED includes alpha channel
// IMREAD_GRAYSCALE loads as intensity 0-1
// load image
string image_path = samples::findFile("peppers.png");
Mat src = imread(image_path, IMREAD_COLOR);
// check if image loaded properly
if (src.empty()) {
cout << "Could not read image: " << image_path << endl;
return 1;
}
// ------------------------------------------
// properties of the source and upscaled image
// ------------------------------------------
// input dimensions
int src_height = src.rows;
int src_width = src.cols;
// channels (e.g. Red, Green, Blue)
int channels = src.channels();
//int type = src.type; // CV_8UC3?
// output dimensions
int dst_height = src_height * 3 - 2;
int dst_width = src_width * 3 - 2;
// number of elements (if a picture has 3 channels, this is 3 * pixels)
int dst_elements = dst_width * dst_height * channels;
int src_elements = src_width * src_height * channels;
// number of bytes each image will take
int dst_size = dst_elements * sizeof(unsigned char);
int src_size = src_elements * sizeof(unsigned char);
// create new image with same datatype as input
Mat dst(dst_height, dst_width, CV_8UC3, Scalar(0, 0, 0));
//Mat dst(dst_height, dst_width, type);
// image data for upscale function
unsigned char* src_img = src.data;
unsigned char* dst_img = dst.data;
cout << "Loaded " << image_path << " -- " << src_height << ", " << src_width << " -- Channels: " << channels << endl;
// initialize device variables
unsigned char* dev_src, * dev_dst;
// number of blocks to call in kernel. Max threads per block is usually 1024
int blocks = (src_elements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// allocate memory in GPU
cudaMalloc((void**)&dev_dst, dst_size);
cudaMalloc((void**)&dev_src, src_size);
// used for shared memory if eventually implemented
//cudaMallocManaged(&dst, dst_size);
//cudaMallocManaged(&src, src_size);
// copy data from CPU to GPU
cudaMemcpy(dev_dst, dst_img, dst_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_src, src_img, src_size, cudaMemcpyHostToDevice);
// start timer for performance evaluation
cudaEventRecord(start);
// call upscale function
//upscale_CUDA<<<blocks, THREADS_PER_BLOCK>>> (dev_dst, dev_src, src_elements, src_width, src_height, threshold); // <<<blocks, threads per block, shared mem>>>
dim3 grid(src_width, src_height);
upscale_CUDA < < <grid, 1 > > >(dev_dst, dev_src, src_width, src_height, channels, threshold);
cudaDeviceSynchronize();
// end timer
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
// copy data back from GPU to CPU
cudaMemcpy(dst_img, dev_dst, dst_size, cudaMemcpyDeviceToHost);
cudaMemcpy(src_img, dev_src, dst_size, cudaMemcpyDeviceToHost); // might not need this
// free GPU
cudaFree(dev_dst);
cudaFree(dev_src);
// create output image. I might not need another Mat -- just use 'dst' instead of 'output'
//Mat output = Mat(dst_height, dst_width, type, dst);
imshow("source", src);
imshow("output", dst);
imwrite("upscaled_image.png", dst);
waitKey(0);
std::cout << "\ntime (ms) = " << ms << std::endl;
}
/*
void upscale(unsigned char* dst, unsigned char* src, int src_height, int src_width, int src_channels, int threshold){
unsigned char* dev_src, * dev_dst;
int dst_width = src_width * 3 - 2;
int dst_height = src_height * 3 - 2;
int dst_elements = dst_width * dst_height * src_channels;
int src_elements = src_width * src_height * src_channels;
int dst_size = dst_elements * sizeof(unsigned char);
int src_size = src_elements * sizeof(unsigned char);
int blocks = (src_elements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// allocate memory in GPU
cudaMalloc((void**)&dev_dst, dst_size);
cudaMalloc((void**)&dev_src, src_size);
//cudaMallocManaged(&dst, dst_size);
//cudaMallocManaged(&src, src_size);
// copy data from CPU to GPU
cudaMemcpy(dev_dst, dst, dst_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_src, src, src_size, cudaMemcpyHostToDevice);
upscale_CUDA<<<blocks, THREADS_PER_BLOCK>>> (dev_src, src_height, src_width, src_channels, threshold); // <<<blocks, threads per block, shared mem>>>
cudaDeviceSynchronize();
// copy data back from GPU to CPU
cudaMemcpy(dst, dev_dst, dst_size, cudaMemcpyDeviceToHost);
// free GPU
cudaFree(dev_dst);
cudaFree(dev_src);
}
*/ | .file "tmpxft_0001e20d_00000000-6_upscale.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // Author: Noah Van Der Weide
// 3/30/2020
// upscale the image by doubling height and width
// fill in empty areas according to neighboring pixels and difference thresholds
// THREE APPROACHES
// FIRST APPROACH:
// each thread will process one pixel
// SECOND APPROACH:
// each thread will process one original pixel and surrounding pixels
// THIRD APPROACH:
// each thread will process one original pixel and pixels to the right and below
// Two filling approaches:
// First:
// Tackle everything at once.
// Second:
// Stretch out original image and fill in adjacent pixels with original pixel value,
// Then go through and SAXPY if original pixel differences aren't too great.
// dimension of image: upper left = (0,0), bottom right = (width-1, height-1)
// *img_original is the original image
// *img_new width = *img_original width * 3 - 2
// *img_new width = *img_original height * 3 - 2
// 8 bits per color (0 - 255)
// upscale function is called independently for each color.
// this allows it to be faster for black and white images as it only needs to be called once.
// Can therefore also be applied to images which use a different color map than RGB (JPEG, for example).
/*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <vector>
#include <stdlib.h>
#include <string.h>
#include "upscale.cuh"
#define THREADS_PER_BLOCK 64
//__global__ void upscale_CUDA(unsigned char* dst, unsigned char* src, int src_height, int src_width, int src_channels, int threshold) {
__global__ void upscale_CUDA(unsigned char * dst, unsigned char * src, int src_width, int src_height, int src_channels, unsigned char threshold) {
// not using shared memory right now
// there is 48 KB of shared memory available.
// images are typically more than that, so I'll have to think about how it could be implemented
//extern __shared__ unsigned char pic[];
//int pixel = blockIdx.x * blockdim.x + threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// not relevant to code function, but shows how a thread could access a pixel in every channel.
// pixel values are from 0 to 255.
//for (int k = 0; k < channels; k++){
// img[idx + k];
//}
int dst_width = src_width * 3 - 2;
//int dst_height = src_height * 3 - 2;
//long int dst_elements = dst_width * dst_height * src_channels;
//long int src_elements = src_width * src_height * src_channels;
int src_stride = src_width * src_channels;
int dst_stride = dst_width * src_channels;
// if invalid location do nothing.
//if (i >= dst_width || j >= dst_height) // is that width or width-1?
if (i >= src_width || j >= src_height)
return;
// all channels for a pixel are grouped together. To access an adjacent pixel, you must add by the number of channels.
for (int k = 0; k < src_channels; k++) {
int dst_index = (j * 21 + i * 3) + k; // this is strictly for my predefined dst width and height (*3 -2)
int src_index = (j * src_width + i) + k;
// transfer known src values to dst
// to access different channels, the number of elements of the src/dst image must be added to the respective array index.
dst[dst_index] = src[src_index];
// vertical comparison acts on src image and applies values to dst image
int y_diff = src[src_index + src_stride] - src[src_index];
if (y_diff < threshold) { // apply third-average
// linear fill
int step = y_diff / 3;
dst[dst_index + dst_stride] = src[src_index] + step;
dst[dst_index + 2 * dst_stride] = src[src_index] + step * 2;
}
else { // nearest neighbor
dst[dst_index + dst_stride] = src[src_index];
dst[dst_index + 2 * dst_stride] = src[src_index + src_stride];
}
__syncthreads();
// horizontal
// I know this is painfully inefficient.
int x_diff_0 = src[src_index] - src[src_index + src_channels];
int x_diff_1 = dst[dst_index + dst_stride] - dst[dst_index + dst_stride + src_channels];
int x_diff_2 = dst[dst_index + 2 * dst_stride] - dst[dst_index + 2 * dst_stride + src_channels];
int step = 0;
if (x_diff_0 < threshold) { // apply third-average
// linear fill
step = x_diff_0 / 3;
dst[dst_index + 1] = src[src_index] + step;
dst[dst_index + 2] = src[src_index] + step * 2;
}
else { // nearest neighbor
dst[dst_index + src_channels] = src[src_index];
dst[dst_index + 2*src_channels] = src[src_index + src_channels];
}
if (x_diff_1 < threshold) { // apply third-average
// linear fill
step = x_diff_1 / 3;
dst[dst_index + dst_stride + src_channels] = dst[dst_index + dst_stride] + step;
dst[dst_index + dst_stride + 2*src_channels] = dst[dst_index + dst_stride] + step * 2;
}
else { // nearest neighbor
dst[dst_index + dst_stride + src_channels] = dst[dst_index + dst_stride];
dst[dst_index + dst_stride + 2*src_channels] = dst[dst_index + dst_stride + 3];
}
if (x_diff_2 < threshold) { // apply third-average
// linear fill
step = x_diff_2 / 3;
dst[dst_index + 2 * dst_stride + src_channels] = dst[dst_index + 2 * dst_stride] + step;
dst[dst_index + 2 * dst_stride + 2*src_channels] = dst[dst_index + 2 * dst_stride] + step * 2;
}
else { // nearest neighbor
dst[dst_index + 2 * dst_stride + src_channels] = dst[dst_index + 2 * dst_stride];
dst[dst_index + 2 * dst_stride + 2*src_channels] = dst[dst_index + 2 * dst_stride + 3];
}
__syncthreads();
}
__syncthreads();
}
__global__ void upscale(unsigned char threshold) {
// CUDA timing parameters
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float ms;
// file handling using opencv
// IMREAD_COLOR loads image in BGR 8-bit format
// IMREAD_UNCHANGED includes alpha channel
// IMREAD_GRAYSCALE loads as intensity 0-1
// load image
string image_path = samples::findFile("peppers.png");
Mat src = imread(image_path, IMREAD_COLOR);
// check if image loaded properly
if (src.empty()) {
cout << "Could not read image: " << image_path << endl;
return 1;
}
// ------------------------------------------
// properties of the source and upscaled image
// ------------------------------------------
// input dimensions
int src_height = src.rows;
int src_width = src.cols;
// channels (e.g. Red, Green, Blue)
int channels = src.channels();
//int type = src.type; // CV_8UC3?
// output dimensions
int dst_height = src_height * 3 - 2;
int dst_width = src_width * 3 - 2;
// number of elements (if a picture has 3 channels, this is 3 * pixels)
int dst_elements = dst_width * dst_height * channels;
int src_elements = src_width * src_height * channels;
// number of bytes each image will take
int dst_size = dst_elements * sizeof(unsigned char);
int src_size = src_elements * sizeof(unsigned char);
// create new image with same datatype as input
Mat dst(dst_height, dst_width, CV_8UC3, Scalar(0, 0, 0));
//Mat dst(dst_height, dst_width, type);
// image data for upscale function
unsigned char* src_img = src.data;
unsigned char* dst_img = dst.data;
cout << "Loaded " << image_path << " -- " << src_height << ", " << src_width << " -- Channels: " << channels << endl;
// initialize device variables
unsigned char* dev_src, * dev_dst;
// number of blocks to call in kernel. Max threads per block is usually 1024
int blocks = (src_elements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// allocate memory in GPU
cudaMalloc((void**)&dev_dst, dst_size);
cudaMalloc((void**)&dev_src, src_size);
// used for shared memory if eventually implemented
//cudaMallocManaged(&dst, dst_size);
//cudaMallocManaged(&src, src_size);
// copy data from CPU to GPU
cudaMemcpy(dev_dst, dst_img, dst_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_src, src_img, src_size, cudaMemcpyHostToDevice);
// start timer for performance evaluation
cudaEventRecord(start);
// call upscale function
//upscale_CUDA<<<blocks, THREADS_PER_BLOCK>>> (dev_dst, dev_src, src_elements, src_width, src_height, threshold); // <<<blocks, threads per block, shared mem>>>
dim3 grid(src_width, src_height);
upscale_CUDA < < <grid, 1 > > >(dev_dst, dev_src, src_width, src_height, channels, threshold);
cudaDeviceSynchronize();
// end timer
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
// copy data back from GPU to CPU
cudaMemcpy(dst_img, dev_dst, dst_size, cudaMemcpyDeviceToHost);
cudaMemcpy(src_img, dev_src, dst_size, cudaMemcpyDeviceToHost); // might not need this
// free GPU
cudaFree(dev_dst);
cudaFree(dev_src);
// create output image. I might not need another Mat -- just use 'dst' instead of 'output'
//Mat output = Mat(dst_height, dst_width, type, dst);
imshow("source", src);
imshow("output", dst);
imwrite("upscaled_image.png", dst);
waitKey(0);
std::cout << "\ntime (ms) = " << ms << std::endl;
}
/*
void upscale(unsigned char* dst, unsigned char* src, int src_height, int src_width, int src_channels, int threshold){
unsigned char* dev_src, * dev_dst;
int dst_width = src_width * 3 - 2;
int dst_height = src_height * 3 - 2;
int dst_elements = dst_width * dst_height * src_channels;
int src_elements = src_width * src_height * src_channels;
int dst_size = dst_elements * sizeof(unsigned char);
int src_size = src_elements * sizeof(unsigned char);
int blocks = (src_elements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// allocate memory in GPU
cudaMalloc((void**)&dev_dst, dst_size);
cudaMalloc((void**)&dev_src, src_size);
//cudaMallocManaged(&dst, dst_size);
//cudaMallocManaged(&src, src_size);
// copy data from CPU to GPU
cudaMemcpy(dev_dst, dst, dst_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_src, src, src_size, cudaMemcpyHostToDevice);
upscale_CUDA<<<blocks, THREADS_PER_BLOCK>>> (dev_src, src_height, src_width, src_channels, threshold); // <<<blocks, threads per block, shared mem>>>
cudaDeviceSynchronize();
// copy data back from GPU to CPU
cudaMemcpy(dst, dev_dst, dst_size, cudaMemcpyDeviceToHost);
// free GPU
cudaFree(dev_dst);
cudaFree(dev_src);
}
*/ | #include <hip/hip_runtime.h>
// Author: Noah Van Der Weide
// 3/30/2020
// upscale the image by doubling height and width
// fill in empty areas according to neighboring pixels and difference thresholds
// THREE APPROACHES
// FIRST APPROACH:
// each thread will process one pixel
// SECOND APPROACH:
// each thread will process one original pixel and surrounding pixels
// THIRD APPROACH:
// each thread will process one original pixel and pixels to the right and below
// Two filling approaches:
// First:
// Tackle everything at once.
// Second:
// Stretch out original image and fill in adjacent pixels with original pixel value,
// Then go through and SAXPY if original pixel differences aren't too great.
// dimension of image: upper left = (0,0), bottom right = (width-1, height-1)
// *img_original is the original image
// *img_new width = *img_original width * 3 - 2
// *img_new width = *img_original height * 3 - 2
// 8 bits per color (0 - 255)
// upscale function is called independently for each color.
// this allows it to be faster for black and white images as it only needs to be called once.
// Can therefore also be applied to images which use a different color map than RGB (JPEG, for example).
/*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <vector>
#include <stdlib.h>
#include <string.h>
#include "upscale.cuh"
#define THREADS_PER_BLOCK 64
//__global__ void upscale_CUDA(unsigned char* dst, unsigned char* src, int src_height, int src_width, int src_channels, int threshold) {
__global__ void upscale_CUDA(unsigned char * dst, unsigned char * src, int src_width, int src_height, int src_channels, unsigned char threshold) {
// not using shared memory right now
// there is 48 KB of shared memory available.
// images are typically more than that, so I'll have to think about how it could be implemented
//extern __shared__ unsigned char pic[];
//int pixel = blockIdx.x * blockdim.x + threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// not relevant to code function, but shows how a thread could access a pixel in every channel.
// pixel values are from 0 to 255.
//for (int k = 0; k < channels; k++){
// img[idx + k];
//}
int dst_width = src_width * 3 - 2;
//int dst_height = src_height * 3 - 2;
//long int dst_elements = dst_width * dst_height * src_channels;
//long int src_elements = src_width * src_height * src_channels;
int src_stride = src_width * src_channels;
int dst_stride = dst_width * src_channels;
// if invalid location do nothing.
//if (i >= dst_width || j >= dst_height) // is that width or width-1?
if (i >= src_width || j >= src_height)
return;
// all channels for a pixel are grouped together. To access an adjacent pixel, you must add by the number of channels.
for (int k = 0; k < src_channels; k++) {
int dst_index = (j * 21 + i * 3) + k; // this is strictly for my predefined dst width and height (*3 -2)
int src_index = (j * src_width + i) + k;
// transfer known src values to dst
// to access different channels, the number of elements of the src/dst image must be added to the respective array index.
dst[dst_index] = src[src_index];
// vertical comparison acts on src image and applies values to dst image
int y_diff = src[src_index + src_stride] - src[src_index];
if (y_diff < threshold) { // apply third-average
// linear fill
int step = y_diff / 3;
dst[dst_index + dst_stride] = src[src_index] + step;
dst[dst_index + 2 * dst_stride] = src[src_index] + step * 2;
}
else { // nearest neighbor
dst[dst_index + dst_stride] = src[src_index];
dst[dst_index + 2 * dst_stride] = src[src_index + src_stride];
}
__syncthreads();
// horizontal
// I know this is painfully inefficient.
int x_diff_0 = src[src_index] - src[src_index + src_channels];
int x_diff_1 = dst[dst_index + dst_stride] - dst[dst_index + dst_stride + src_channels];
int x_diff_2 = dst[dst_index + 2 * dst_stride] - dst[dst_index + 2 * dst_stride + src_channels];
int step = 0;
if (x_diff_0 < threshold) { // apply third-average
// linear fill
step = x_diff_0 / 3;
dst[dst_index + 1] = src[src_index] + step;
dst[dst_index + 2] = src[src_index] + step * 2;
}
else { // nearest neighbor
dst[dst_index + src_channels] = src[src_index];
dst[dst_index + 2*src_channels] = src[src_index + src_channels];
}
if (x_diff_1 < threshold) { // apply third-average
// linear fill
step = x_diff_1 / 3;
dst[dst_index + dst_stride + src_channels] = dst[dst_index + dst_stride] + step;
dst[dst_index + dst_stride + 2*src_channels] = dst[dst_index + dst_stride] + step * 2;
}
else { // nearest neighbor
dst[dst_index + dst_stride + src_channels] = dst[dst_index + dst_stride];
dst[dst_index + dst_stride + 2*src_channels] = dst[dst_index + dst_stride + 3];
}
if (x_diff_2 < threshold) { // apply third-average
// linear fill
step = x_diff_2 / 3;
dst[dst_index + 2 * dst_stride + src_channels] = dst[dst_index + 2 * dst_stride] + step;
dst[dst_index + 2 * dst_stride + 2*src_channels] = dst[dst_index + 2 * dst_stride] + step * 2;
}
else { // nearest neighbor
dst[dst_index + 2 * dst_stride + src_channels] = dst[dst_index + 2 * dst_stride];
dst[dst_index + 2 * dst_stride + 2*src_channels] = dst[dst_index + 2 * dst_stride + 3];
}
__syncthreads();
}
__syncthreads();
}
__global__ void upscale(unsigned char threshold) {
// CUDA timing parameters
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float ms;
// file handling using opencv
// IMREAD_COLOR loads image in BGR 8-bit format
// IMREAD_UNCHANGED includes alpha channel
// IMREAD_GRAYSCALE loads as intensity 0-1
// load image
string image_path = samples::findFile("peppers.png");
Mat src = imread(image_path, IMREAD_COLOR);
// check if image loaded properly
if (src.empty()) {
cout << "Could not read image: " << image_path << endl;
return 1;
}
// ------------------------------------------
// properties of the source and upscaled image
// ------------------------------------------
// input dimensions
int src_height = src.rows;
int src_width = src.cols;
// channels (e.g. Red, Green, Blue)
int channels = src.channels();
//int type = src.type; // CV_8UC3?
// output dimensions
int dst_height = src_height * 3 - 2;
int dst_width = src_width * 3 - 2;
// number of elements (if a picture has 3 channels, this is 3 * pixels)
int dst_elements = dst_width * dst_height * channels;
int src_elements = src_width * src_height * channels;
// number of bytes each image will take
int dst_size = dst_elements * sizeof(unsigned char);
int src_size = src_elements * sizeof(unsigned char);
// create new image with same datatype as input
Mat dst(dst_height, dst_width, CV_8UC3, Scalar(0, 0, 0));
//Mat dst(dst_height, dst_width, type);
// image data for upscale function
unsigned char* src_img = src.data;
unsigned char* dst_img = dst.data;
cout << "Loaded " << image_path << " -- " << src_height << ", " << src_width << " -- Channels: " << channels << endl;
// initialize device variables
unsigned char* dev_src, * dev_dst;
// number of blocks to call in kernel. Max threads per block is usually 1024
int blocks = (src_elements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// allocate memory in GPU
cudaMalloc((void**)&dev_dst, dst_size);
cudaMalloc((void**)&dev_src, src_size);
// used for shared memory if eventually implemented
//cudaMallocManaged(&dst, dst_size);
//cudaMallocManaged(&src, src_size);
// copy data from CPU to GPU
cudaMemcpy(dev_dst, dst_img, dst_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_src, src_img, src_size, cudaMemcpyHostToDevice);
// start timer for performance evaluation
cudaEventRecord(start);
// call upscale function
//upscale_CUDA<<<blocks, THREADS_PER_BLOCK>>> (dev_dst, dev_src, src_elements, src_width, src_height, threshold); // <<<blocks, threads per block, shared mem>>>
dim3 grid(src_width, src_height);
upscale_CUDA < < <grid, 1 > > >(dev_dst, dev_src, src_width, src_height, channels, threshold);
cudaDeviceSynchronize();
// end timer
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
// copy data back from GPU to CPU
cudaMemcpy(dst_img, dev_dst, dst_size, cudaMemcpyDeviceToHost);
cudaMemcpy(src_img, dev_src, dst_size, cudaMemcpyDeviceToHost); // might not need this
// free GPU
cudaFree(dev_dst);
cudaFree(dev_src);
// create output image. I might not need another Mat -- just use 'dst' instead of 'output'
//Mat output = Mat(dst_height, dst_width, type, dst);
imshow("source", src);
imshow("output", dst);
imwrite("upscaled_image.png", dst);
waitKey(0);
std::cout << "\ntime (ms) = " << ms << std::endl;
}
/*
void upscale(unsigned char* dst, unsigned char* src, int src_height, int src_width, int src_channels, int threshold){
unsigned char* dev_src, * dev_dst;
int dst_width = src_width * 3 - 2;
int dst_height = src_height * 3 - 2;
int dst_elements = dst_width * dst_height * src_channels;
int src_elements = src_width * src_height * src_channels;
int dst_size = dst_elements * sizeof(unsigned char);
int src_size = src_elements * sizeof(unsigned char);
int blocks = (src_elements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// allocate memory in GPU
cudaMalloc((void**)&dev_dst, dst_size);
cudaMalloc((void**)&dev_src, src_size);
//cudaMallocManaged(&dst, dst_size);
//cudaMallocManaged(&src, src_size);
// copy data from CPU to GPU
cudaMemcpy(dev_dst, dst, dst_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_src, src, src_size, cudaMemcpyHostToDevice);
upscale_CUDA<<<blocks, THREADS_PER_BLOCK>>> (dev_src, src_height, src_width, src_channels, threshold); // <<<blocks, threads per block, shared mem>>>
cudaDeviceSynchronize();
// copy data back from GPU to CPU
cudaMemcpy(dst, dev_dst, dst_size, cudaMemcpyDeviceToHost);
// free GPU
cudaFree(dev_dst);
cudaFree(dev_src);
}
*/ |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
// Author: Noah Van Der Weide
// 3/30/2020
// upscale the image by doubling height and width
// fill in empty areas according to neighboring pixels and difference thresholds
// THREE APPROACHES
// FIRST APPROACH:
// each thread will process one pixel
// SECOND APPROACH:
// each thread will process one original pixel and surrounding pixels
// THIRD APPROACH:
// each thread will process one original pixel and pixels to the right and below
// Two filling approaches:
// First:
// Tackle everything at once.
// Second:
// Stretch out original image and fill in adjacent pixels with original pixel value,
// Then go through and SAXPY if original pixel differences aren't too great.
// dimension of image: upper left = (0,0), bottom right = (width-1, height-1)
// *img_original is the original image
// *img_new width = *img_original width * 3 - 2
// *img_new width = *img_original height * 3 - 2
// 8 bits per color (0 - 255)
// upscale function is called independently for each color.
// this allows it to be faster for black and white images as it only needs to be called once.
// Can therefore also be applied to images which use a different color map than RGB (JPEG, for example).
/*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <vector>
#include <stdlib.h>
#include <string.h>
#include "upscale.cuh"
#define THREADS_PER_BLOCK 64
//__global__ void upscale_CUDA(unsigned char* dst, unsigned char* src, int src_height, int src_width, int src_channels, int threshold) {
__global__ void upscale_CUDA(unsigned char * dst, unsigned char * src, int src_width, int src_height, int src_channels, unsigned char threshold) {
// not using shared memory right now
// there is 48 KB of shared memory available.
// images are typically more than that, so I'll have to think about how it could be implemented
//extern __shared__ unsigned char pic[];
//int pixel = blockIdx.x * blockdim.x + threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// not relevant to code function, but shows how a thread could access a pixel in every channel.
// pixel values are from 0 to 255.
//for (int k = 0; k < channels; k++){
// img[idx + k];
//}
int dst_width = src_width * 3 - 2;
//int dst_height = src_height * 3 - 2;
//long int dst_elements = dst_width * dst_height * src_channels;
//long int src_elements = src_width * src_height * src_channels;
int src_stride = src_width * src_channels;
int dst_stride = dst_width * src_channels;
// if invalid location do nothing.
//if (i >= dst_width || j >= dst_height) // is that width or width-1?
if (i >= src_width || j >= src_height)
return;
// all channels for a pixel are grouped together. To access an adjacent pixel, you must add by the number of channels.
for (int k = 0; k < src_channels; k++) {
int dst_index = (j * 21 + i * 3) + k; // this is strictly for my predefined dst width and height (*3 -2)
int src_index = (j * src_width + i) + k;
// transfer known src values to dst
// to access different channels, the number of elements of the src/dst image must be added to the respective array index.
dst[dst_index] = src[src_index];
// vertical comparison acts on src image and applies values to dst image
int y_diff = src[src_index + src_stride] - src[src_index];
if (y_diff < threshold) { // apply third-average
// linear fill
int step = y_diff / 3;
dst[dst_index + dst_stride] = src[src_index] + step;
dst[dst_index + 2 * dst_stride] = src[src_index] + step * 2;
}
else { // nearest neighbor
dst[dst_index + dst_stride] = src[src_index];
dst[dst_index + 2 * dst_stride] = src[src_index + src_stride];
}
__syncthreads();
// horizontal
// I know this is painfully inefficient.
int x_diff_0 = src[src_index] - src[src_index + src_channels];
int x_diff_1 = dst[dst_index + dst_stride] - dst[dst_index + dst_stride + src_channels];
int x_diff_2 = dst[dst_index + 2 * dst_stride] - dst[dst_index + 2 * dst_stride + src_channels];
int step = 0;
if (x_diff_0 < threshold) { // apply third-average
// linear fill
step = x_diff_0 / 3;
dst[dst_index + 1] = src[src_index] + step;
dst[dst_index + 2] = src[src_index] + step * 2;
}
else { // nearest neighbor
dst[dst_index + src_channels] = src[src_index];
dst[dst_index + 2*src_channels] = src[src_index + src_channels];
}
if (x_diff_1 < threshold) { // apply third-average
// linear fill
step = x_diff_1 / 3;
dst[dst_index + dst_stride + src_channels] = dst[dst_index + dst_stride] + step;
dst[dst_index + dst_stride + 2*src_channels] = dst[dst_index + dst_stride] + step * 2;
}
else { // nearest neighbor
dst[dst_index + dst_stride + src_channels] = dst[dst_index + dst_stride];
dst[dst_index + dst_stride + 2*src_channels] = dst[dst_index + dst_stride + 3];
}
if (x_diff_2 < threshold) { // apply third-average
// linear fill
step = x_diff_2 / 3;
dst[dst_index + 2 * dst_stride + src_channels] = dst[dst_index + 2 * dst_stride] + step;
dst[dst_index + 2 * dst_stride + 2*src_channels] = dst[dst_index + 2 * dst_stride] + step * 2;
}
else { // nearest neighbor
dst[dst_index + 2 * dst_stride + src_channels] = dst[dst_index + 2 * dst_stride];
dst[dst_index + 2 * dst_stride + 2*src_channels] = dst[dst_index + 2 * dst_stride + 3];
}
__syncthreads();
}
__syncthreads();
}
__global__ void upscale(unsigned char threshold) {
// CUDA timing parameters
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float ms;
// file handling using opencv
// IMREAD_COLOR loads image in BGR 8-bit format
// IMREAD_UNCHANGED includes alpha channel
// IMREAD_GRAYSCALE loads as intensity 0-1
// load image
string image_path = samples::findFile("peppers.png");
Mat src = imread(image_path, IMREAD_COLOR);
// check if image loaded properly
if (src.empty()) {
cout << "Could not read image: " << image_path << endl;
return 1;
}
// ------------------------------------------
// properties of the source and upscaled image
// ------------------------------------------
// input dimensions
int src_height = src.rows;
int src_width = src.cols;
// channels (e.g. Red, Green, Blue)
int channels = src.channels();
//int type = src.type; // CV_8UC3?
// output dimensions
int dst_height = src_height * 3 - 2;
int dst_width = src_width * 3 - 2;
// number of elements (if a picture has 3 channels, this is 3 * pixels)
int dst_elements = dst_width * dst_height * channels;
int src_elements = src_width * src_height * channels;
// number of bytes each image will take
int dst_size = dst_elements * sizeof(unsigned char);
int src_size = src_elements * sizeof(unsigned char);
// create new image with same datatype as input
Mat dst(dst_height, dst_width, CV_8UC3, Scalar(0, 0, 0));
//Mat dst(dst_height, dst_width, type);
// image data for upscale function
unsigned char* src_img = src.data;
unsigned char* dst_img = dst.data;
cout << "Loaded " << image_path << " -- " << src_height << ", " << src_width << " -- Channels: " << channels << endl;
// initialize device variables
unsigned char* dev_src, * dev_dst;
// number of blocks to call in kernel. Max threads per block is usually 1024
int blocks = (src_elements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// allocate memory in GPU
cudaMalloc((void**)&dev_dst, dst_size);
cudaMalloc((void**)&dev_src, src_size);
// used for shared memory if eventually implemented
//cudaMallocManaged(&dst, dst_size);
//cudaMallocManaged(&src, src_size);
// copy data from CPU to GPU
cudaMemcpy(dev_dst, dst_img, dst_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_src, src_img, src_size, cudaMemcpyHostToDevice);
// start timer for performance evaluation
cudaEventRecord(start);
// call upscale function
//upscale_CUDA<<<blocks, THREADS_PER_BLOCK>>> (dev_dst, dev_src, src_elements, src_width, src_height, threshold); // <<<blocks, threads per block, shared mem>>>
dim3 grid(src_width, src_height);
upscale_CUDA < < <grid, 1 > > >(dev_dst, dev_src, src_width, src_height, channels, threshold);
cudaDeviceSynchronize();
// end timer
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
// copy data back from GPU to CPU
cudaMemcpy(dst_img, dev_dst, dst_size, cudaMemcpyDeviceToHost);
cudaMemcpy(src_img, dev_src, dst_size, cudaMemcpyDeviceToHost); // might not need this
// free GPU
cudaFree(dev_dst);
cudaFree(dev_src);
// create output image. I might not need another Mat -- just use 'dst' instead of 'output'
//Mat output = Mat(dst_height, dst_width, type, dst);
imshow("source", src);
imshow("output", dst);
imwrite("upscaled_image.png", dst);
waitKey(0);
std::cout << "\ntime (ms) = " << ms << std::endl;
}
/*
void upscale(unsigned char* dst, unsigned char* src, int src_height, int src_width, int src_channels, int threshold){
unsigned char* dev_src, * dev_dst;
int dst_width = src_width * 3 - 2;
int dst_height = src_height * 3 - 2;
int dst_elements = dst_width * dst_height * src_channels;
int src_elements = src_width * src_height * src_channels;
int dst_size = dst_elements * sizeof(unsigned char);
int src_size = src_elements * sizeof(unsigned char);
int blocks = (src_elements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// allocate memory in GPU
cudaMalloc((void**)&dev_dst, dst_size);
cudaMalloc((void**)&dev_src, src_size);
//cudaMallocManaged(&dst, dst_size);
//cudaMallocManaged(&src, src_size);
// copy data from CPU to GPU
cudaMemcpy(dev_dst, dst, dst_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_src, src, src_size, cudaMemcpyHostToDevice);
upscale_CUDA<<<blocks, THREADS_PER_BLOCK>>> (dev_src, src_height, src_width, src_channels, threshold); // <<<blocks, threads per block, shared mem>>>
cudaDeviceSynchronize();
// copy data back from GPU to CPU
cudaMemcpy(dst, dev_dst, dst_size, cudaMemcpyDeviceToHost);
// free GPU
cudaFree(dev_dst);
cudaFree(dev_src);
}
*/ | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
// Author: Noah Van Der Weide
// 3/30/2020
// upscale the image by doubling height and width
// fill in empty areas according to neighboring pixels and difference thresholds
// THREE APPROACHES
// FIRST APPROACH:
// each thread will process one pixel
// SECOND APPROACH:
// each thread will process one original pixel and surrounding pixels
// THIRD APPROACH:
// each thread will process one original pixel and pixels to the right and below
// Two filling approaches:
// First:
// Tackle everything at once.
// Second:
// Stretch out original image and fill in adjacent pixels with original pixel value,
// Then go through and SAXPY if original pixel differences aren't too great.
// dimension of image: upper left = (0,0), bottom right = (width-1, height-1)
// *img_original is the original image
// *img_new width = *img_original width * 3 - 2
// *img_new width = *img_original height * 3 - 2
// 8 bits per color (0 - 255)
// upscale function is called independently for each color.
// this allows it to be faster for black and white images as it only needs to be called once.
// Can therefore also be applied to images which use a different color map than RGB (JPEG, for example).
/*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <vector>
#include <stdlib.h>
#include <string.h>
#include "upscale.cuh"
#define THREADS_PER_BLOCK 64
//__global__ void upscale_CUDA(unsigned char* dst, unsigned char* src, int src_height, int src_width, int src_channels, int threshold) {
__global__ void upscale_CUDA(unsigned char * dst, unsigned char * src, int src_width, int src_height, int src_channels, unsigned char threshold) {
// not using shared memory right now
// there is 48 KB of shared memory available.
// images are typically more than that, so I'll have to think about how it could be implemented
//extern __shared__ unsigned char pic[];
//int pixel = blockIdx.x * blockdim.x + threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// not relevant to code function, but shows how a thread could access a pixel in every channel.
// pixel values are from 0 to 255.
//for (int k = 0; k < channels; k++){
// img[idx + k];
//}
int dst_width = src_width * 3 - 2;
//int dst_height = src_height * 3 - 2;
//long int dst_elements = dst_width * dst_height * src_channels;
//long int src_elements = src_width * src_height * src_channels;
int src_stride = src_width * src_channels;
int dst_stride = dst_width * src_channels;
// if invalid location do nothing.
//if (i >= dst_width || j >= dst_height) // is that width or width-1?
if (i >= src_width || j >= src_height)
return;
// all channels for a pixel are grouped together. To access an adjacent pixel, you must add by the number of channels.
for (int k = 0; k < src_channels; k++) {
int dst_index = (j * 21 + i * 3) + k; // this is strictly for my predefined dst width and height (*3 -2)
int src_index = (j * src_width + i) + k;
// transfer known src values to dst
// to access different channels, the number of elements of the src/dst image must be added to the respective array index.
dst[dst_index] = src[src_index];
// vertical comparison acts on src image and applies values to dst image
int y_diff = src[src_index + src_stride] - src[src_index];
if (y_diff < threshold) { // apply third-average
// linear fill
int step = y_diff / 3;
dst[dst_index + dst_stride] = src[src_index] + step;
dst[dst_index + 2 * dst_stride] = src[src_index] + step * 2;
}
else { // nearest neighbor
dst[dst_index + dst_stride] = src[src_index];
dst[dst_index + 2 * dst_stride] = src[src_index + src_stride];
}
__syncthreads();
// horizontal
// I know this is painfully inefficient.
int x_diff_0 = src[src_index] - src[src_index + src_channels];
int x_diff_1 = dst[dst_index + dst_stride] - dst[dst_index + dst_stride + src_channels];
int x_diff_2 = dst[dst_index + 2 * dst_stride] - dst[dst_index + 2 * dst_stride + src_channels];
int step = 0;
if (x_diff_0 < threshold) { // apply third-average
// linear fill
step = x_diff_0 / 3;
dst[dst_index + 1] = src[src_index] + step;
dst[dst_index + 2] = src[src_index] + step * 2;
}
else { // nearest neighbor
dst[dst_index + src_channels] = src[src_index];
dst[dst_index + 2*src_channels] = src[src_index + src_channels];
}
if (x_diff_1 < threshold) { // apply third-average
// linear fill
step = x_diff_1 / 3;
dst[dst_index + dst_stride + src_channels] = dst[dst_index + dst_stride] + step;
dst[dst_index + dst_stride + 2*src_channels] = dst[dst_index + dst_stride] + step * 2;
}
else { // nearest neighbor
dst[dst_index + dst_stride + src_channels] = dst[dst_index + dst_stride];
dst[dst_index + dst_stride + 2*src_channels] = dst[dst_index + dst_stride + 3];
}
if (x_diff_2 < threshold) { // apply third-average
// linear fill
step = x_diff_2 / 3;
dst[dst_index + 2 * dst_stride + src_channels] = dst[dst_index + 2 * dst_stride] + step;
dst[dst_index + 2 * dst_stride + 2*src_channels] = dst[dst_index + 2 * dst_stride] + step * 2;
}
else { // nearest neighbor
dst[dst_index + 2 * dst_stride + src_channels] = dst[dst_index + 2 * dst_stride];
dst[dst_index + 2 * dst_stride + 2*src_channels] = dst[dst_index + 2 * dst_stride + 3];
}
__syncthreads();
}
__syncthreads();
}
__global__ void upscale(unsigned char threshold) {
// CUDA timing parameters
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float ms;
// file handling using opencv
// IMREAD_COLOR loads image in BGR 8-bit format
// IMREAD_UNCHANGED includes alpha channel
// IMREAD_GRAYSCALE loads as intensity 0-1
// load image
string image_path = samples::findFile("peppers.png");
Mat src = imread(image_path, IMREAD_COLOR);
// check if image loaded properly
if (src.empty()) {
cout << "Could not read image: " << image_path << endl;
return 1;
}
// ------------------------------------------
// properties of the source and upscaled image
// ------------------------------------------
// input dimensions
int src_height = src.rows;
int src_width = src.cols;
// channels (e.g. Red, Green, Blue)
int channels = src.channels();
//int type = src.type; // CV_8UC3?
// output dimensions
int dst_height = src_height * 3 - 2;
int dst_width = src_width * 3 - 2;
// number of elements (if a picture has 3 channels, this is 3 * pixels)
int dst_elements = dst_width * dst_height * channels;
int src_elements = src_width * src_height * channels;
// number of bytes each image will take
int dst_size = dst_elements * sizeof(unsigned char);
int src_size = src_elements * sizeof(unsigned char);
// create new image with same datatype as input
Mat dst(dst_height, dst_width, CV_8UC3, Scalar(0, 0, 0));
//Mat dst(dst_height, dst_width, type);
// image data for upscale function
unsigned char* src_img = src.data;
unsigned char* dst_img = dst.data;
cout << "Loaded " << image_path << " -- " << src_height << ", " << src_width << " -- Channels: " << channels << endl;
// initialize device variables
unsigned char* dev_src, * dev_dst;
// number of blocks to call in kernel. Max threads per block is usually 1024
int blocks = (src_elements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// allocate memory in GPU
cudaMalloc((void**)&dev_dst, dst_size);
cudaMalloc((void**)&dev_src, src_size);
// used for shared memory if eventually implemented
//cudaMallocManaged(&dst, dst_size);
//cudaMallocManaged(&src, src_size);
// copy data from CPU to GPU
cudaMemcpy(dev_dst, dst_img, dst_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_src, src_img, src_size, cudaMemcpyHostToDevice);
// start timer for performance evaluation
cudaEventRecord(start);
// call upscale function
//upscale_CUDA<<<blocks, THREADS_PER_BLOCK>>> (dev_dst, dev_src, src_elements, src_width, src_height, threshold); // <<<blocks, threads per block, shared mem>>>
dim3 grid(src_width, src_height);
upscale_CUDA < < <grid, 1 > > >(dev_dst, dev_src, src_width, src_height, channels, threshold);
cudaDeviceSynchronize();
// end timer
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
// copy data back from GPU to CPU
cudaMemcpy(dst_img, dev_dst, dst_size, cudaMemcpyDeviceToHost);
cudaMemcpy(src_img, dev_src, dst_size, cudaMemcpyDeviceToHost); // might not need this
// free GPU
cudaFree(dev_dst);
cudaFree(dev_src);
// create output image. I might not need another Mat -- just use 'dst' instead of 'output'
//Mat output = Mat(dst_height, dst_width, type, dst);
imshow("source", src);
imshow("output", dst);
imwrite("upscaled_image.png", dst);
waitKey(0);
std::cout << "\ntime (ms) = " << ms << std::endl;
}
/*
void upscale(unsigned char* dst, unsigned char* src, int src_height, int src_width, int src_channels, int threshold){
unsigned char* dev_src, * dev_dst;
int dst_width = src_width * 3 - 2;
int dst_height = src_height * 3 - 2;
int dst_elements = dst_width * dst_height * src_channels;
int src_elements = src_width * src_height * src_channels;
int dst_size = dst_elements * sizeof(unsigned char);
int src_size = src_elements * sizeof(unsigned char);
int blocks = (src_elements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// allocate memory in GPU
cudaMalloc((void**)&dev_dst, dst_size);
cudaMalloc((void**)&dev_src, src_size);
//cudaMallocManaged(&dst, dst_size);
//cudaMallocManaged(&src, src_size);
// copy data from CPU to GPU
cudaMemcpy(dev_dst, dst, dst_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_src, src, src_size, cudaMemcpyHostToDevice);
upscale_CUDA<<<blocks, THREADS_PER_BLOCK>>> (dev_src, src_height, src_width, src_channels, threshold); // <<<blocks, threads per block, shared mem>>>
cudaDeviceSynchronize();
// copy data back from GPU to CPU
cudaMemcpy(dst, dev_dst, dst_size, cudaMemcpyDeviceToHost);
// free GPU
cudaFree(dev_dst);
cudaFree(dev_src);
}
*/ | .text
.file "upscale.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0001e20d_00000000-6_upscale.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "upscale.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#define N 1024
#define N_THR 512
void fill_ints(int* a, int size){
for(int i =0; i<size; i++)
a[i]=i;
}
__global__ void dotVecs(int *x, int *y, int *r){
__shared__ int s_tmp[N_THR];
int index = threadIdx.x + blockIdx.x * blockDim.x;
int temp = x[index] * y[index];
s_tmp[threadIdx.x] = temp; // store the multiplication to the shared memory
__syncthreads();
// Thread 0 performs the reduction
if(threadIdx.x == 0){
int sum = 0;
for(int i = 0 ; i < N_THR ; i++) sum += s_tmp[i];
*r += sum;
}
}
int main(void){
int *a, *b, *c; // host pointers
int *d_a, *d_b, *d_c; // device pointers
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, sizeof(int));
a = (int *)malloc(size);
fill_ints(a, N); // Alloc space host, random initialization
b = (int *)malloc(size);
fill_ints(b, N);
c = (int *)malloc(sizeof(int));
// Copy data from host to device memory
// cudaMemcpyHostToDevice is a flag determining copying from host to dev.
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMemset(d_c,0,sizeof(int));
// Launch kernel to add two vector with N threads and 1 block
// Kernel calls are asynchronous
dotVecs<<<2,N_THR>>>(d_a, d_b, d_c);
// Copy results from device to host
// cudaMemcpy blocks CPU until Kernels finish execution
cudaMemcpy(c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n",*c);
// needs cudaFree to deallocate device pointers
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
free(a); free(b); free(c);
return 0;
} | .file "tmpxft_001b0530_00000000-6_ex3_3.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9fill_intsPii
.type _Z9fill_intsPii, @function
_Z9fill_intsPii:
.LFB2057:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L3
movslq %esi, %rsi
movl $0, %eax
.L5:
movl %eax, (%rdi,%rax,4)
addq $1, %rax
cmpq %rsi, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE2057:
.size _Z9fill_intsPii, .-_Z9fill_intsPii
.globl _Z30__device_stub__Z7dotVecsPiS_S_PiS_S_
.type _Z30__device_stub__Z7dotVecsPiS_S_PiS_S_, @function
_Z30__device_stub__Z7dotVecsPiS_S_PiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7dotVecsPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z30__device_stub__Z7dotVecsPiS_S_PiS_S_, .-_Z30__device_stub__Z7dotVecsPiS_S_PiS_S_
.globl _Z7dotVecsPiS_S_
.type _Z7dotVecsPiS_S_, @function
_Z7dotVecsPiS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z7dotVecsPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z7dotVecsPiS_S_, .-_Z7dotVecsPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $4096, %edi
call malloc@PLT
movq %rax, %r12
movl $1024, %esi
movq %rax, %rdi
call _Z9fill_intsPii
movl $4096, %edi
call malloc@PLT
movq %rax, %rbp
movl $1024, %esi
movq %rax, %rdi
call _Z9fill_intsPii
movl $4, %edi
call malloc@PLT
movq %rax, %rbx
movl $1, %ecx
movl $4096, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4096, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $4, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
movl $512, 44(%rsp)
movl $1, 48(%rsp)
movl $2, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L16:
movl $2, %ecx
movl $4, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl (%rbx), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z30__device_stub__Z7dotVecsPiS_S_PiS_S_
jmp .L16
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z7dotVecsPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z7dotVecsPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#define N 1024
#define N_THR 512
void fill_ints(int* a, int size){
for(int i =0; i<size; i++)
a[i]=i;
}
__global__ void dotVecs(int *x, int *y, int *r){
__shared__ int s_tmp[N_THR];
int index = threadIdx.x + blockIdx.x * blockDim.x;
int temp = x[index] * y[index];
s_tmp[threadIdx.x] = temp; // store the multiplication to the shared memory
__syncthreads();
// Thread 0 performs the reduction
if(threadIdx.x == 0){
int sum = 0;
for(int i = 0 ; i < N_THR ; i++) sum += s_tmp[i];
*r += sum;
}
}
int main(void){
int *a, *b, *c; // host pointers
int *d_a, *d_b, *d_c; // device pointers
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, sizeof(int));
a = (int *)malloc(size);
fill_ints(a, N); // Alloc space host, random initialization
b = (int *)malloc(size);
fill_ints(b, N);
c = (int *)malloc(sizeof(int));
// Copy data from host to device memory
// cudaMemcpyHostToDevice is a flag determining copying from host to dev.
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMemset(d_c,0,sizeof(int));
// Launch kernel to add two vector with N threads and 1 block
// Kernel calls are asynchronous
dotVecs<<<2,N_THR>>>(d_a, d_b, d_c);
// Copy results from device to host
// cudaMemcpy blocks CPU until Kernels finish execution
cudaMemcpy(c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n",*c);
// needs cudaFree to deallocate device pointers
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
free(a); free(b); free(c);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N 1024
#define N_THR 512
void fill_ints(int* a, int size){
for(int i =0; i<size; i++)
a[i]=i;
}
__global__ void dotVecs(int *x, int *y, int *r){
__shared__ int s_tmp[N_THR];
int index = threadIdx.x + blockIdx.x * blockDim.x;
int temp = x[index] * y[index];
s_tmp[threadIdx.x] = temp; // store the multiplication to the shared memory
__syncthreads();
// Thread 0 performs the reduction
if(threadIdx.x == 0){
int sum = 0;
for(int i = 0 ; i < N_THR ; i++) sum += s_tmp[i];
*r += sum;
}
}
int main(void){
int *a, *b, *c; // host pointers
int *d_a, *d_b, *d_c; // device pointers
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, sizeof(int));
a = (int *)malloc(size);
fill_ints(a, N); // Alloc space host, random initialization
b = (int *)malloc(size);
fill_ints(b, N);
c = (int *)malloc(sizeof(int));
// Copy data from host to device memory
// cudaMemcpyHostToDevice is a flag determining copying from host to dev.
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
hipMemset(d_c,0,sizeof(int));
// Launch kernel to add two vector with N threads and 1 block
// Kernel calls are asynchronous
dotVecs<<<2,N_THR>>>(d_a, d_b, d_c);
// Copy results from device to host
// cudaMemcpy blocks CPU until Kernels finish execution
hipMemcpy(c, d_c, sizeof(int), hipMemcpyDeviceToHost);
printf("%d\n",*c);
// needs cudaFree to deallocate device pointers
hipFree(d_a); hipFree(d_b); hipFree(d_c);
free(a); free(b); free(c);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N 1024
#define N_THR 512
void fill_ints(int* a, int size){
for(int i =0; i<size; i++)
a[i]=i;
}
__global__ void dotVecs(int *x, int *y, int *r){
__shared__ int s_tmp[N_THR];
int index = threadIdx.x + blockIdx.x * blockDim.x;
int temp = x[index] * y[index];
s_tmp[threadIdx.x] = temp; // store the multiplication to the shared memory
__syncthreads();
// Thread 0 performs the reduction
if(threadIdx.x == 0){
int sum = 0;
for(int i = 0 ; i < N_THR ; i++) sum += s_tmp[i];
*r += sum;
}
}
int main(void){
int *a, *b, *c; // host pointers
int *d_a, *d_b, *d_c; // device pointers
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, sizeof(int));
a = (int *)malloc(size);
fill_ints(a, N); // Alloc space host, random initialization
b = (int *)malloc(size);
fill_ints(b, N);
c = (int *)malloc(sizeof(int));
// Copy data from host to device memory
// cudaMemcpyHostToDevice is a flag determining copying from host to dev.
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
hipMemset(d_c,0,sizeof(int));
// Launch kernel to add two vector with N threads and 1 block
// Kernel calls are asynchronous
dotVecs<<<2,N_THR>>>(d_a, d_b, d_c);
// Copy results from device to host
// cudaMemcpy blocks CPU until Kernels finish execution
hipMemcpy(c, d_c, sizeof(int), hipMemcpyDeviceToHost);
printf("%d\n",*c);
// needs cudaFree to deallocate device pointers
hipFree(d_a); hipFree(d_b); hipFree(d_c);
free(a); free(b); free(c);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7dotVecsPiS_S_
.globl _Z7dotVecsPiS_S_
.p2align 8
.type _Z7dotVecsPiS_S_,@function
_Z7dotVecsPiS_S_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, 0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_add_co_u32 v3, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v2, vcc_lo
v_add_co_u32 v1, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
global_load_b32 v3, v[3:4], off
global_load_b32 v1, v[1:2], off
v_lshlrev_b32_e32 v2, 2, v0
s_waitcnt vmcnt(0)
v_mul_lo_u32 v1, v1, v3
ds_store_b32 v2, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_4
v_mov_b32_e32 v0, 0
.LBB0_2:
v_mov_b32_e32 v1, s2
s_add_i32 s2, s2, 4
s_delay_alu instid0(SALU_CYCLE_1)
s_cmpk_eq_i32 s2, 0x800
ds_load_b32 v1, v1
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v0, v1, v0
s_cbranch_scc0 .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_load_b32 s2, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v1, 0 :: v_dual_add_nc_u32 v0, s2, v0
global_store_b32 v1, v0, s[0:1]
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7dotVecsPiS_S_
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7dotVecsPiS_S_, .Lfunc_end0-_Z7dotVecsPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7dotVecsPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7dotVecsPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N 1024
#define N_THR 512
void fill_ints(int* a, int size){
for(int i =0; i<size; i++)
a[i]=i;
}
__global__ void dotVecs(int *x, int *y, int *r){
__shared__ int s_tmp[N_THR];
int index = threadIdx.x + blockIdx.x * blockDim.x;
int temp = x[index] * y[index];
s_tmp[threadIdx.x] = temp; // store the multiplication to the shared memory
__syncthreads();
// Thread 0 performs the reduction
if(threadIdx.x == 0){
int sum = 0;
for(int i = 0 ; i < N_THR ; i++) sum += s_tmp[i];
*r += sum;
}
}
int main(void){
int *a, *b, *c; // host pointers
int *d_a, *d_b, *d_c; // device pointers
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, sizeof(int));
a = (int *)malloc(size);
fill_ints(a, N); // Alloc space host, random initialization
b = (int *)malloc(size);
fill_ints(b, N);
c = (int *)malloc(sizeof(int));
// Copy data from host to device memory
// cudaMemcpyHostToDevice is a flag determining copying from host to dev.
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
hipMemset(d_c,0,sizeof(int));
// Launch kernel to add two vector with N threads and 1 block
// Kernel calls are asynchronous
dotVecs<<<2,N_THR>>>(d_a, d_b, d_c);
// Copy results from device to host
// cudaMemcpy blocks CPU until Kernels finish execution
hipMemcpy(c, d_c, sizeof(int), hipMemcpyDeviceToHost);
printf("%d\n",*c);
// needs cudaFree to deallocate device pointers
hipFree(d_a); hipFree(d_b); hipFree(d_c);
free(a); free(b); free(c);
return 0;
} | .text
.file "ex3_3.hip"
.globl _Z9fill_intsPii # -- Begin function _Z9fill_intsPii
.p2align 4, 0x90
.type _Z9fill_intsPii,@function
_Z9fill_intsPii: # @_Z9fill_intsPii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_3
# %bb.1: # %.lr.ph.preheader
movl %esi, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %ecx, (%rdi,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB0_2
.LBB0_3: # %._crit_edge
retq
.Lfunc_end0:
.size _Z9fill_intsPii, .Lfunc_end0-_Z9fill_intsPii
.cfi_endproc
# -- End function
.globl _Z22__device_stub__dotVecsPiS_S_ # -- Begin function _Z22__device_stub__dotVecsPiS_S_
.p2align 4, 0x90
.type _Z22__device_stub__dotVecsPiS_S_,@function
_Z22__device_stub__dotVecsPiS_S_: # @_Z22__device_stub__dotVecsPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7dotVecsPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z22__device_stub__dotVecsPiS_S_, .Lfunc_end1-_Z22__device_stub__dotVecsPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $128, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 16(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
movq %rsp, %rdi
movl $4, %esi
callq hipMalloc
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %rbx
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movl %eax, (%rbx,%rax,4)
incq %rax
cmpq $1024, %rax # imm = 0x400
jne .LBB2_1
# %bb.2: # %_Z9fill_intsPii.exit
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %r14
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_3: # %.lr.ph.i16
# =>This Inner Loop Header: Depth=1
movl %eax, (%r14,%rax,4)
incq %rax
cmpq $1024, %rax # imm = 0x400
jne .LBB2_3
# %bb.4: # %_Z9fill_intsPii.exit20
movl $4, %edi
callq malloc
movq %rax, %r15
movq 16(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
movl $4, %edx
xorl %esi, %esi
callq hipMemset
movabsq $4294967298, %rdi # imm = 0x100000002
leaq 510(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7dotVecsPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
movq (%rsp), %rsi
movl $4, %edx
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movl (%r15), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
addq $128, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7dotVecsPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7dotVecsPiS_S_,@object # @_Z7dotVecsPiS_S_
.section .rodata,"a",@progbits
.globl _Z7dotVecsPiS_S_
.p2align 3, 0x0
_Z7dotVecsPiS_S_:
.quad _Z22__device_stub__dotVecsPiS_S_
.size _Z7dotVecsPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d\n"
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7dotVecsPiS_S_"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__dotVecsPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7dotVecsPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001b0530_00000000-6_ex3_3.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9fill_intsPii
.type _Z9fill_intsPii, @function
_Z9fill_intsPii:
.LFB2057:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L3
movslq %esi, %rsi
movl $0, %eax
.L5:
movl %eax, (%rdi,%rax,4)
addq $1, %rax
cmpq %rsi, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE2057:
.size _Z9fill_intsPii, .-_Z9fill_intsPii
.globl _Z30__device_stub__Z7dotVecsPiS_S_PiS_S_
.type _Z30__device_stub__Z7dotVecsPiS_S_PiS_S_, @function
_Z30__device_stub__Z7dotVecsPiS_S_PiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7dotVecsPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z30__device_stub__Z7dotVecsPiS_S_PiS_S_, .-_Z30__device_stub__Z7dotVecsPiS_S_PiS_S_
.globl _Z7dotVecsPiS_S_
.type _Z7dotVecsPiS_S_, @function
_Z7dotVecsPiS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z7dotVecsPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z7dotVecsPiS_S_, .-_Z7dotVecsPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $4096, %edi
call malloc@PLT
movq %rax, %r12
movl $1024, %esi
movq %rax, %rdi
call _Z9fill_intsPii
movl $4096, %edi
call malloc@PLT
movq %rax, %rbp
movl $1024, %esi
movq %rax, %rdi
call _Z9fill_intsPii
movl $4, %edi
call malloc@PLT
movq %rax, %rbx
movl $1, %ecx
movl $4096, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4096, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $4, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
movl $512, 44(%rsp)
movl $1, 48(%rsp)
movl $2, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L16:
movl $2, %ecx
movl $4, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl (%rbx), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z30__device_stub__Z7dotVecsPiS_S_PiS_S_
jmp .L16
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z7dotVecsPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z7dotVecsPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "ex3_3.hip"
.globl _Z9fill_intsPii # -- Begin function _Z9fill_intsPii
.p2align 4, 0x90
.type _Z9fill_intsPii,@function
_Z9fill_intsPii: # @_Z9fill_intsPii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_3
# %bb.1: # %.lr.ph.preheader
movl %esi, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %ecx, (%rdi,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB0_2
.LBB0_3: # %._crit_edge
retq
.Lfunc_end0:
.size _Z9fill_intsPii, .Lfunc_end0-_Z9fill_intsPii
.cfi_endproc
# -- End function
.globl _Z22__device_stub__dotVecsPiS_S_ # -- Begin function _Z22__device_stub__dotVecsPiS_S_
.p2align 4, 0x90
.type _Z22__device_stub__dotVecsPiS_S_,@function
_Z22__device_stub__dotVecsPiS_S_: # @_Z22__device_stub__dotVecsPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7dotVecsPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z22__device_stub__dotVecsPiS_S_, .Lfunc_end1-_Z22__device_stub__dotVecsPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $128, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 16(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
movq %rsp, %rdi
movl $4, %esi
callq hipMalloc
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %rbx
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movl %eax, (%rbx,%rax,4)
incq %rax
cmpq $1024, %rax # imm = 0x400
jne .LBB2_1
# %bb.2: # %_Z9fill_intsPii.exit
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %r14
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_3: # %.lr.ph.i16
# =>This Inner Loop Header: Depth=1
movl %eax, (%r14,%rax,4)
incq %rax
cmpq $1024, %rax # imm = 0x400
jne .LBB2_3
# %bb.4: # %_Z9fill_intsPii.exit20
movl $4, %edi
callq malloc
movq %rax, %r15
movq 16(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
movl $4, %edx
xorl %esi, %esi
callq hipMemset
movabsq $4294967298, %rdi # imm = 0x100000002
leaq 510(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7dotVecsPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
movq (%rsp), %rsi
movl $4, %edx
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movl (%r15), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
addq $128, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7dotVecsPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7dotVecsPiS_S_,@object # @_Z7dotVecsPiS_S_
.section .rodata,"a",@progbits
.globl _Z7dotVecsPiS_S_
.p2align 3, 0x0
_Z7dotVecsPiS_S_:
.quad _Z22__device_stub__dotVecsPiS_S_
.size _Z7dotVecsPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d\n"
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7dotVecsPiS_S_"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__dotVecsPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7dotVecsPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
__global__ void helloFromGPU() {
printf("Hello World from GPU (Thread #%d)!\n", threadIdx.x);
}
int main(void){
printf("Hello World from CPU!\n");
helloFromGPU<<<1,10>>>();
cudaDeviceSynchronize();
printf("Goodbye World from CPU!\n");
cudaDeviceReset();
return 0;
} | code for sm_80
Function : _Z12helloFromGPUv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fc800078e00ff */
/*0010*/ S2R R8, SR_TID.X ; /* 0x0000000000087919 */
/* 0x000e220000002100 */
/*0020*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */
/* 0x000fe20007ffe0ff */
/*0030*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0040*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0050*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0060*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */
/* 0x000fe40007f1e0ff */
/*0070*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x0002a60000000a00 */
/*0080*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */
/* 0x000fe200000e06ff */
/*0090*/ STL [R1], R8 ; /* 0x0000000801007387 */
/* 0x0013e80000100800 */
/*00a0*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x002fc60000000000 */
/*00b0*/ MOV R11, 0x120 ; /* 0x00000120000b7802 */
/* 0x000fe40000000f00 */
/*00c0*/ MOV R20, 0xa0 ; /* 0x000000a000147802 */
/* 0x000fc40000000f00 */
/*00d0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*00e0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*00f0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*0100*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*0110*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x004fea0003c00000 */
/*0120*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0130*/ BRA 0x130; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
__global__ void helloFromGPU() {
printf("Hello World from GPU (Thread #%d)!\n", threadIdx.x);
}
int main(void){
printf("Hello World from CPU!\n");
helloFromGPU<<<1,10>>>();
cudaDeviceSynchronize();
printf("Goodbye World from CPU!\n");
cudaDeviceReset();
return 0;
} | .file "tmpxft_00013cc2_00000000-6_hello_world_gpu_02.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z12helloFromGPUvv
.type _Z31__device_stub__Z12helloFromGPUvv, @function
_Z31__device_stub__Z12helloFromGPUvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z12helloFromGPUv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z31__device_stub__Z12helloFromGPUvv, .-_Z31__device_stub__Z12helloFromGPUvv
.globl _Z12helloFromGPUv
.type _Z12helloFromGPUv, @function
_Z12helloFromGPUv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z12helloFromGPUvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z12helloFromGPUv, .-_Z12helloFromGPUv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello World from CPU!\n"
.LC1:
.string "Goodbye World from CPU!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $10, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
call cudaDeviceSynchronize@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call cudaDeviceReset@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call _Z31__device_stub__Z12helloFromGPUvv
jmp .L12
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z12helloFromGPUv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z12helloFromGPUv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
__global__ void helloFromGPU() {
printf("Hello World from GPU (Thread #%d)!\n", threadIdx.x);
}
int main(void){
printf("Hello World from CPU!\n");
helloFromGPU<<<1,10>>>();
cudaDeviceSynchronize();
printf("Goodbye World from CPU!\n");
cudaDeviceReset();
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void helloFromGPU() {
printf("Hello World from GPU (Thread #%d)!\n", threadIdx.x);
}
int main(void){
printf("Hello World from CPU!\n");
helloFromGPU<<<1,10>>>();
hipDeviceSynchronize();
printf("Goodbye World from CPU!\n");
hipDeviceReset();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void helloFromGPU() {
printf("Hello World from GPU (Thread #%d)!\n", threadIdx.x);
}
int main(void){
printf("Hello World from CPU!\n");
helloFromGPU<<<1,10>>>();
hipDeviceSynchronize();
printf("Goodbye World from CPU!\n");
hipDeviceReset();
return 0;
} | .text
.file "hello_world_gpu_02.hip"
.globl _Z27__device_stub__helloFromGPUv # -- Begin function _Z27__device_stub__helloFromGPUv
.p2align 4, 0x90
.type _Z27__device_stub__helloFromGPUv,@function
_Z27__device_stub__helloFromGPUv: # @_Z27__device_stub__helloFromGPUv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z12helloFromGPUv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z27__device_stub__helloFromGPUv, .Lfunc_end0-_Z27__device_stub__helloFromGPUv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movl $.Lstr, %edi
callq puts@PLT
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 9(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z12helloFromGPUv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
movl $.Lstr.1, %edi
callq puts@PLT
callq hipDeviceReset
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12helloFromGPUv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12helloFromGPUv,@object # @_Z12helloFromGPUv
.section .rodata,"a",@progbits
.globl _Z12helloFromGPUv
.p2align 3, 0x0
_Z12helloFromGPUv:
.quad _Z27__device_stub__helloFromGPUv
.size _Z12helloFromGPUv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z12helloFromGPUv"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hello World from CPU!"
.size .Lstr, 22
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Goodbye World from CPU!"
.size .Lstr.1, 24
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__helloFromGPUv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12helloFromGPUv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00013cc2_00000000-6_hello_world_gpu_02.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z12helloFromGPUvv
.type _Z31__device_stub__Z12helloFromGPUvv, @function
_Z31__device_stub__Z12helloFromGPUvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z12helloFromGPUv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z31__device_stub__Z12helloFromGPUvv, .-_Z31__device_stub__Z12helloFromGPUvv
.globl _Z12helloFromGPUv
.type _Z12helloFromGPUv, @function
_Z12helloFromGPUv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z12helloFromGPUvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z12helloFromGPUv, .-_Z12helloFromGPUv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello World from CPU!\n"
.LC1:
.string "Goodbye World from CPU!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $10, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
call cudaDeviceSynchronize@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call cudaDeviceReset@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call _Z31__device_stub__Z12helloFromGPUvv
jmp .L12
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z12helloFromGPUv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z12helloFromGPUv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "hello_world_gpu_02.hip"
.globl _Z27__device_stub__helloFromGPUv # -- Begin function _Z27__device_stub__helloFromGPUv
.p2align 4, 0x90
.type _Z27__device_stub__helloFromGPUv,@function
_Z27__device_stub__helloFromGPUv: # @_Z27__device_stub__helloFromGPUv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z12helloFromGPUv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z27__device_stub__helloFromGPUv, .Lfunc_end0-_Z27__device_stub__helloFromGPUv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movl $.Lstr, %edi
callq puts@PLT
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 9(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z12helloFromGPUv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
movl $.Lstr.1, %edi
callq puts@PLT
callq hipDeviceReset
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12helloFromGPUv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12helloFromGPUv,@object # @_Z12helloFromGPUv
.section .rodata,"a",@progbits
.globl _Z12helloFromGPUv
.p2align 3, 0x0
_Z12helloFromGPUv:
.quad _Z27__device_stub__helloFromGPUv
.size _Z12helloFromGPUv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z12helloFromGPUv"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hello World from CPU!"
.size .Lstr, 22
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Goodbye World from CPU!"
.size .Lstr.1, 24
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__helloFromGPUv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12helloFromGPUv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
__global__ void update_h(int nx, int ny, int nz, int nyz, int idx0, float *hx, float *hy, float *hz, float *ex, float *ey, float *ez) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
int i = idx/(nyz);
int j = (idx - i*nyz)/nz;
int k = idx%nz;
if( j>0 && k>0 ) hx[idx] -= 0.5*( ez[idx] - ez[idx-nz] - ey[idx] + ey[idx-1] );
if( i>0 && k>0 ) hy[idx] -= 0.5*( ex[idx] - ex[idx-1] - ez[idx] + ez[idx-nyz] );
if( i>0 && j>0 ) hz[idx] -= 0.5*( ey[idx] - ey[idx-nyz] - ex[idx] + ex[idx-nz] );
}
__global__ void update_e(int nx, int ny, int nz, int nyz, int idx0, float *hx, float *hy, float *hz, float *ex, float *ey, float *ez, float *cex, float *cey, float *cez) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
int i = idx/(nyz);
int j = (idx - i*nyz)/nz;
int k = idx%nz;
if( j<ny-1 && k<nz-1 ) ex[idx] += cex[idx]*( hz[idx+nz] - hz[idx] - hy[idx+1] + hy[idx] );
if( i<nx-1 && k<nz-1 ) ey[idx] += cey[idx]*( hx[idx+1] - hx[idx] - hz[idx+nyz] + hz[idx] );
if( i<nx-1 && j<ny-1 ) ez[idx] += cez[idx]*( hy[idx+nyz] - hy[idx] - hx[idx+nz] + hx[idx] );
}
__global__ void update_src(int nx, int ny, int nz, int nyz, float tn, float *f) {
int idx = threadIdx.x;
int ijk = (nx/2)*nyz + (ny/2)*nz + idx;
if( idx < nz ) f[ijk] += sin(0.1*tn);
}
__global__ void init_zero(int n, int idx0, float *f) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
if( idx < n ) f[idx] = 0;
}
int main() {
int i, n, nx, ny, nz, tn, tmax;
nx = 320;
ny = 480;
nz = 480;
n = nx*ny*nz;
tmax = 100000;
printf("Simple FDTD simulation\n", nx, ny, nz);
printf("Array size : %dx%dx%d\n", nx, ny, nz);
printf("Total used memory : %1.2f GB\n", n*4*9./(1024*1024*1024));
printf("Iteration : %d step\n", tmax);
// memory allocate
float *f, *cf;
f = (float *) calloc (n, sizeof(float));
cf = (float *) calloc (n, sizeof(float));
for( i=0; i<n; i++ ) cf[i] = 0.5;
float *hx_gpu, *hy_gpu, *hz_gpu;
float *ex_gpu, *ey_gpu, *ez_gpu;
float *cex_gpu, *cey_gpu, *cez_gpu;
cudaMalloc ( (void**) &hx_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &hy_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &hz_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &ex_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &ey_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &ez_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &cex_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &cey_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &cez_gpu, n*sizeof(float) );
cudaMemcpy ( cex_gpu, cf, n*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy ( cey_gpu, cf, n*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy ( cez_gpu, cf, n*sizeof(float), cudaMemcpyHostToDevice );
int ng = 6; // number of grid
int tpb = 256; // threads per block
int bpg = n/tpb/ng; // blocks per grid
for( i=0; i<ng; i++) {
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hx_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hy_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hz_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ex_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ey_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ez_gpu);
}
// main loop
for( tn=0; tn<tmax; tn++ ) {
for( i=0; i<ng; i++) update_h <<<dim3(bpg),dim3(tpb)>>> (nx, ny, nz, ny*nz, i*bpg*tpb, hx_gpu, hy_gpu, hz_gpu, ex_gpu, ey_gpu, ez_gpu);
for( i=0; i<ng; i++) update_e <<<dim3(bpg),dim3(tpb)>>> (nx, ny, nz, ny*nz, i*bpg*tpb, hx_gpu, hy_gpu, hz_gpu, ex_gpu, ey_gpu, ez_gpu, cex_gpu, cey_gpu, cez_gpu);
update_src <<<dim3(1),dim3(512)>>> (nx, ny, nz, ny*nz, tn, ez_gpu);
}
cudaMemcpy( f, ez_gpu, n*sizeof(float), cudaMemcpyDeviceToHost );
printf("Complete.\n");
return 0;
} | .file "tmpxft_001a0ad3_00000000-6_012-row_major.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z8update_hiiiiiPfS_S_S_S_S_iiiiiPfS_S_S_S_S_
.type _Z42__device_stub__Z8update_hiiiiiPfS_S_S_S_S_iiiiiPfS_S_S_S_S_, @function
_Z42__device_stub__Z8update_hiiiiiPfS_S_S_S_S_iiiiiPfS_S_S_S_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $248, %rsp
.cfi_def_cfa_offset 256
movl %edi, 76(%rsp)
movl %esi, 72(%rsp)
movl %edx, 68(%rsp)
movl %ecx, 64(%rsp)
movl %r8d, 60(%rsp)
movq %r9, 48(%rsp)
movq 256(%rsp), %rax
movq %rax, 40(%rsp)
movq 264(%rsp), %rax
movq %rax, 32(%rsp)
movq 272(%rsp), %rax
movq %rax, 24(%rsp)
movq 280(%rsp), %rax
movq %rax, 16(%rsp)
movq 288(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 232(%rsp)
xorl %eax, %eax
leaq 76(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rax
movq %rax, 152(%rsp)
leaq 68(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rax
movq %rax, 168(%rsp)
leaq 60(%rsp), %rax
movq %rax, 176(%rsp)
leaq 48(%rsp), %rax
movq %rax, 184(%rsp)
leaq 40(%rsp), %rax
movq %rax, 192(%rsp)
leaq 32(%rsp), %rax
movq %rax, 200(%rsp)
leaq 24(%rsp), %rax
movq %rax, 208(%rsp)
leaq 16(%rsp), %rax
movq %rax, 216(%rsp)
leaq 8(%rsp), %rax
movq %rax, 224(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
leaq 88(%rsp), %rcx
leaq 80(%rsp), %rdx
leaq 108(%rsp), %rsi
leaq 96(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $248, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 88(%rsp)
.cfi_def_cfa_offset 264
pushq 88(%rsp)
.cfi_def_cfa_offset 272
leaq 160(%rsp), %r9
movq 124(%rsp), %rcx
movl 132(%rsp), %r8d
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
leaq _Z8update_hiiiiiPfS_S_S_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 256
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z42__device_stub__Z8update_hiiiiiPfS_S_S_S_S_iiiiiPfS_S_S_S_S_, .-_Z42__device_stub__Z8update_hiiiiiPfS_S_S_S_S_iiiiiPfS_S_S_S_S_
.globl _Z8update_hiiiiiPfS_S_S_S_S_
.type _Z8update_hiiiiiPfS_S_S_S_S_, @function
_Z8update_hiiiiiPfS_S_S_S_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 56(%rsp)
.cfi_def_cfa_offset 32
pushq 56(%rsp)
.cfi_def_cfa_offset 40
pushq 56(%rsp)
.cfi_def_cfa_offset 48
pushq 56(%rsp)
.cfi_def_cfa_offset 56
pushq 56(%rsp)
.cfi_def_cfa_offset 64
call _Z42__device_stub__Z8update_hiiiiiPfS_S_S_S_S_iiiiiPfS_S_S_S_S_
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z8update_hiiiiiPfS_S_S_S_S_, .-_Z8update_hiiiiiPfS_S_S_S_S_
.globl _Z48__device_stub__Z8update_eiiiiiPfS_S_S_S_S_S_S_S_iiiiiPfS_S_S_S_S_S_S_S_
.type _Z48__device_stub__Z8update_eiiiiiPfS_S_S_S_S_S_S_S_iiiiiPfS_S_S_S_S_S_S_S_, @function
_Z48__device_stub__Z8update_eiiiiiPfS_S_S_S_S_S_S_S_iiiiiPfS_S_S_S_S_S_S_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $296, %rsp
.cfi_def_cfa_offset 304
movl %edi, 92(%rsp)
movl %esi, 88(%rsp)
movl %edx, 84(%rsp)
movl %ecx, 80(%rsp)
movl %r8d, 76(%rsp)
movq %r9, 64(%rsp)
movq 304(%rsp), %rax
movq %rax, 56(%rsp)
movq 312(%rsp), %rax
movq %rax, 48(%rsp)
movq 320(%rsp), %rax
movq %rax, 40(%rsp)
movq 328(%rsp), %rax
movq %rax, 32(%rsp)
movq 336(%rsp), %rax
movq %rax, 24(%rsp)
movq 344(%rsp), %rax
movq %rax, 16(%rsp)
movq 352(%rsp), %rax
movq %rax, 8(%rsp)
movq 360(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 280(%rsp)
xorl %eax, %eax
leaq 92(%rsp), %rax
movq %rax, 160(%rsp)
leaq 88(%rsp), %rax
movq %rax, 168(%rsp)
leaq 84(%rsp), %rax
movq %rax, 176(%rsp)
leaq 80(%rsp), %rax
movq %rax, 184(%rsp)
leaq 76(%rsp), %rax
movq %rax, 192(%rsp)
leaq 64(%rsp), %rax
movq %rax, 200(%rsp)
leaq 56(%rsp), %rax
movq %rax, 208(%rsp)
leaq 48(%rsp), %rax
movq %rax, 216(%rsp)
leaq 40(%rsp), %rax
movq %rax, 224(%rsp)
leaq 32(%rsp), %rax
movq %rax, 232(%rsp)
leaq 24(%rsp), %rax
movq %rax, 240(%rsp)
leaq 16(%rsp), %rax
movq %rax, 248(%rsp)
leaq 8(%rsp), %rax
movq %rax, 256(%rsp)
movq %rsp, %rax
movq %rax, 264(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 124(%rsp)
movl $1, 128(%rsp)
movl $1, 132(%rsp)
leaq 104(%rsp), %rcx
leaq 96(%rsp), %rdx
leaq 124(%rsp), %rsi
leaq 112(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 280(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $296, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 104(%rsp)
.cfi_def_cfa_offset 312
pushq 104(%rsp)
.cfi_def_cfa_offset 320
leaq 176(%rsp), %r9
movq 140(%rsp), %rcx
movl 148(%rsp), %r8d
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
leaq _Z8update_eiiiiiPfS_S_S_S_S_S_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 304
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z48__device_stub__Z8update_eiiiiiPfS_S_S_S_S_S_S_S_iiiiiPfS_S_S_S_S_S_S_S_, .-_Z48__device_stub__Z8update_eiiiiiPfS_S_S_S_S_S_S_S_iiiiiPfS_S_S_S_S_S_S_S_
.globl _Z8update_eiiiiiPfS_S_S_S_S_S_S_S_
.type _Z8update_eiiiiiPfS_S_S_S_S_S_S_S_, @function
_Z8update_eiiiiiPfS_S_S_S_S_S_S_S_:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 72(%rsp)
.cfi_def_cfa_offset 24
pushq 72(%rsp)
.cfi_def_cfa_offset 32
pushq 72(%rsp)
.cfi_def_cfa_offset 40
pushq 72(%rsp)
.cfi_def_cfa_offset 48
pushq 72(%rsp)
.cfi_def_cfa_offset 56
pushq 72(%rsp)
.cfi_def_cfa_offset 64
pushq 72(%rsp)
.cfi_def_cfa_offset 72
pushq 72(%rsp)
.cfi_def_cfa_offset 80
call _Z48__device_stub__Z8update_eiiiiiPfS_S_S_S_S_S_S_S_iiiiiPfS_S_S_S_S_S_S_S_
addq $72, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z8update_eiiiiiPfS_S_S_S_S_S_S_S_, .-_Z8update_eiiiiiPfS_S_S_S_S_S_S_S_
.globl _Z35__device_stub__Z10update_srciiiifPfiiiifPf
.type _Z35__device_stub__Z10update_srciiiifPfiiiifPf, @function
_Z35__device_stub__Z10update_srciiiifPfiiiifPf:
.LFB2086:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movss %xmm0, 12(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 184
pushq 40(%rsp)
.cfi_def_cfa_offset 192
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10update_srciiiifPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z35__device_stub__Z10update_srciiiifPfiiiifPf, .-_Z35__device_stub__Z10update_srciiiifPfiiiifPf
.globl _Z10update_srciiiifPf
.type _Z10update_srciiiifPf, @function
_Z10update_srciiiifPf:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z10update_srciiiifPfiiiifPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z10update_srciiiifPf, .-_Z10update_srciiiifPf
.globl _Z30__device_stub__Z9init_zeroiiPfiiPf
.type _Z30__device_stub__Z9init_zeroiiPfiiPf, @function
_Z30__device_stub__Z9init_zeroiiPfiiPf:
.LFB2088:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z9init_zeroiiPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z30__device_stub__Z9init_zeroiiPfiiPf, .-_Z30__device_stub__Z9init_zeroiiPfiiPf
.globl _Z9init_zeroiiPf
.type _Z9init_zeroiiPf, @function
_Z9init_zeroiiPf:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z9init_zeroiiPfiiPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z9init_zeroiiPf, .-_Z9init_zeroiiPf
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Simple FDTD simulation\n"
.LC1:
.string "Array size : %dx%dx%d\n"
.LC3:
.string "Total used memory : %1.2f GB\n"
.LC4:
.string "Iteration : %d step\n"
.LC6:
.string "Complete.\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $112, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
movl $480, %r8d
movl $480, %ecx
movl $320, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $480, %r8d
movl $480, %ecx
movl $320, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movsd .LC2(%rip), %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $100000, %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $4, %esi
movl $73728000, %edi
call calloc@PLT
movq %rax, %r12
movl $4, %esi
movl $73728000, %edi
call calloc@PLT
movq %rax, %rbx
leaq 294912000(%rax), %rdx
movss .LC5(%rip), %xmm0
.L36:
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L36
leaq 8(%rsp), %rdi
movl $294912000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $294912000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $294912000, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $294912000, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $294912000, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movl $294912000, %esi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movl $294912000, %esi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movl $294912000, %esi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movl $294912000, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $294912000, %edx
movq %rbx, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $294912000, %edx
movq %rbx, %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $294912000, %edx
movq %rbx, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %ebx
jmp .L43
.L58:
movq 8(%rsp), %rdx
movl %ebx, %esi
movl $73728000, %edi
call _Z30__device_stub__Z9init_zeroiiPfiiPf
jmp .L37
.L59:
movq 16(%rsp), %rdx
movl %ebx, %esi
movl $73728000, %edi
call _Z30__device_stub__Z9init_zeroiiPfiiPf
jmp .L38
.L60:
movq 24(%rsp), %rdx
movl %ebx, %esi
movl $73728000, %edi
call _Z30__device_stub__Z9init_zeroiiPfiiPf
jmp .L39
.L61:
movq 32(%rsp), %rdx
movl %ebx, %esi
movl $73728000, %edi
call _Z30__device_stub__Z9init_zeroiiPfiiPf
jmp .L40
.L62:
movq 40(%rsp), %rdx
movl %ebx, %esi
movl $73728000, %edi
call _Z30__device_stub__Z9init_zeroiiPfiiPf
jmp .L41
.L42:
addl $12288000, %ebx
cmpl $73728000, %ebx
je .L57
.L43:
movl $256, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $48000, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L58
.L37:
movl $256, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $48000, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L59
.L38:
movl $256, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $48000, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L60
.L39:
movl $256, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $48000, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L61
.L40:
movl $256, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $48000, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L62
.L41:
movl $256, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $48000, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L42
movq 48(%rsp), %rdx
movl %ebx, %esi
movl $73728000, %edi
call _Z30__device_stub__Z9init_zeroiiPfiiPf
jmp .L42
.L57:
movl $0, %ebp
jmp .L44
.L45:
addl $12288000, %ebx
cmpl $73728000, %ebx
je .L63
.L46:
movl $256, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $48000, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L45
subq $8, %rsp
.cfi_def_cfa_offset 152
pushq 56(%rsp)
.cfi_def_cfa_offset 160
pushq 56(%rsp)
.cfi_def_cfa_offset 168
pushq 56(%rsp)
.cfi_def_cfa_offset 176
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
movq 56(%rsp), %r9
movl %ebx, %r8d
movl $230400, %ecx
movl $480, %edx
movl $480, %esi
movl $320, %edi
call _Z42__device_stub__Z8update_hiiiiiPfS_S_S_S_S_iiiiiPfS_S_S_S_S_
addq $48, %rsp
.cfi_def_cfa_offset 144
jmp .L45
.L63:
movl $0, %ebx
jmp .L48
.L47:
addl $12288000, %ebx
cmpl $73728000, %ebx
je .L64
.L48:
movl $256, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $48000, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L47
pushq 72(%rsp)
.cfi_def_cfa_offset 152
pushq 72(%rsp)
.cfi_def_cfa_offset 160
pushq 72(%rsp)
.cfi_def_cfa_offset 168
pushq 72(%rsp)
.cfi_def_cfa_offset 176
pushq 72(%rsp)
.cfi_def_cfa_offset 184
pushq 72(%rsp)
.cfi_def_cfa_offset 192
pushq 72(%rsp)
.cfi_def_cfa_offset 200
pushq 72(%rsp)
.cfi_def_cfa_offset 208
movq 72(%rsp), %r9
movl %ebx, %r8d
movl $230400, %ecx
movl $480, %edx
movl $480, %esi
movl $320, %edi
call _Z48__device_stub__Z8update_eiiiiiPfS_S_S_S_S_S_S_S_iiiiiPfS_S_S_S_S_S_S_S_
addq $64, %rsp
.cfi_def_cfa_offset 144
jmp .L47
.L64:
movl $512, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L65
.L49:
addl $1, %ebp
cmpl $100000, %ebp
je .L50
.L44:
movl $0, %ebx
jmp .L46
.L65:
pxor %xmm0, %xmm0
cvtsi2ssl %ebp, %xmm0
movq 48(%rsp), %r8
movl $230400, %ecx
movl $480, %edx
movl $480, %esi
movl $320, %edi
call _Z35__device_stub__Z10update_srciiiifPfiiiifPf
jmp .L49
.L50:
movl $2, %ecx
movl $294912000, %edx
movq 48(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L66
movl $0, %eax
addq $112, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L66:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z9init_zeroiiPf"
.LC8:
.string "_Z10update_srciiiifPf"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC9:
.string "_Z8update_eiiiiiPfS_S_S_S_S_S_S_S_"
.section .rodata.str1.1
.LC10:
.string "_Z8update_hiiiiiPfS_S_S_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2091:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z9init_zeroiiPf(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z10update_srciiiifPf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z8update_eiiiiiPfS_S_S_S_S_S_S_S_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z8update_hiiiiiPfS_S_S_S_S_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long 0
.long 1073989248
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC5:
.long 1056964608
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
__global__ void update_h(int nx, int ny, int nz, int nyz, int idx0, float *hx, float *hy, float *hz, float *ex, float *ey, float *ez) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
int i = idx/(nyz);
int j = (idx - i*nyz)/nz;
int k = idx%nz;
if( j>0 && k>0 ) hx[idx] -= 0.5*( ez[idx] - ez[idx-nz] - ey[idx] + ey[idx-1] );
if( i>0 && k>0 ) hy[idx] -= 0.5*( ex[idx] - ex[idx-1] - ez[idx] + ez[idx-nyz] );
if( i>0 && j>0 ) hz[idx] -= 0.5*( ey[idx] - ey[idx-nyz] - ex[idx] + ex[idx-nz] );
}
__global__ void update_e(int nx, int ny, int nz, int nyz, int idx0, float *hx, float *hy, float *hz, float *ex, float *ey, float *ez, float *cex, float *cey, float *cez) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
int i = idx/(nyz);
int j = (idx - i*nyz)/nz;
int k = idx%nz;
if( j<ny-1 && k<nz-1 ) ex[idx] += cex[idx]*( hz[idx+nz] - hz[idx] - hy[idx+1] + hy[idx] );
if( i<nx-1 && k<nz-1 ) ey[idx] += cey[idx]*( hx[idx+1] - hx[idx] - hz[idx+nyz] + hz[idx] );
if( i<nx-1 && j<ny-1 ) ez[idx] += cez[idx]*( hy[idx+nyz] - hy[idx] - hx[idx+nz] + hx[idx] );
}
__global__ void update_src(int nx, int ny, int nz, int nyz, float tn, float *f) {
int idx = threadIdx.x;
int ijk = (nx/2)*nyz + (ny/2)*nz + idx;
if( idx < nz ) f[ijk] += sin(0.1*tn);
}
__global__ void init_zero(int n, int idx0, float *f) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
if( idx < n ) f[idx] = 0;
}
int main() {
int i, n, nx, ny, nz, tn, tmax;
nx = 320;
ny = 480;
nz = 480;
n = nx*ny*nz;
tmax = 100000;
printf("Simple FDTD simulation\n", nx, ny, nz);
printf("Array size : %dx%dx%d\n", nx, ny, nz);
printf("Total used memory : %1.2f GB\n", n*4*9./(1024*1024*1024));
printf("Iteration : %d step\n", tmax);
// memory allocate
float *f, *cf;
f = (float *) calloc (n, sizeof(float));
cf = (float *) calloc (n, sizeof(float));
for( i=0; i<n; i++ ) cf[i] = 0.5;
float *hx_gpu, *hy_gpu, *hz_gpu;
float *ex_gpu, *ey_gpu, *ez_gpu;
float *cex_gpu, *cey_gpu, *cez_gpu;
cudaMalloc ( (void**) &hx_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &hy_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &hz_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &ex_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &ey_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &ez_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &cex_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &cey_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &cez_gpu, n*sizeof(float) );
cudaMemcpy ( cex_gpu, cf, n*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy ( cey_gpu, cf, n*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy ( cez_gpu, cf, n*sizeof(float), cudaMemcpyHostToDevice );
int ng = 6; // number of grid
int tpb = 256; // threads per block
int bpg = n/tpb/ng; // blocks per grid
for( i=0; i<ng; i++) {
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hx_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hy_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hz_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ex_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ey_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ez_gpu);
}
// main loop
for( tn=0; tn<tmax; tn++ ) {
for( i=0; i<ng; i++) update_h <<<dim3(bpg),dim3(tpb)>>> (nx, ny, nz, ny*nz, i*bpg*tpb, hx_gpu, hy_gpu, hz_gpu, ex_gpu, ey_gpu, ez_gpu);
for( i=0; i<ng; i++) update_e <<<dim3(bpg),dim3(tpb)>>> (nx, ny, nz, ny*nz, i*bpg*tpb, hx_gpu, hy_gpu, hz_gpu, ex_gpu, ey_gpu, ez_gpu, cex_gpu, cey_gpu, cez_gpu);
update_src <<<dim3(1),dim3(512)>>> (nx, ny, nz, ny*nz, tn, ez_gpu);
}
cudaMemcpy( f, ez_gpu, n*sizeof(float), cudaMemcpyDeviceToHost );
printf("Complete.\n");
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void update_h(int nx, int ny, int nz, int nyz, int idx0, float *hx, float *hy, float *hz, float *ex, float *ey, float *ez) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
int i = idx/(nyz);
int j = (idx - i*nyz)/nz;
int k = idx%nz;
if( j>0 && k>0 ) hx[idx] -= 0.5*( ez[idx] - ez[idx-nz] - ey[idx] + ey[idx-1] );
if( i>0 && k>0 ) hy[idx] -= 0.5*( ex[idx] - ex[idx-1] - ez[idx] + ez[idx-nyz] );
if( i>0 && j>0 ) hz[idx] -= 0.5*( ey[idx] - ey[idx-nyz] - ex[idx] + ex[idx-nz] );
}
__global__ void update_e(int nx, int ny, int nz, int nyz, int idx0, float *hx, float *hy, float *hz, float *ex, float *ey, float *ez, float *cex, float *cey, float *cez) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
int i = idx/(nyz);
int j = (idx - i*nyz)/nz;
int k = idx%nz;
if( j<ny-1 && k<nz-1 ) ex[idx] += cex[idx]*( hz[idx+nz] - hz[idx] - hy[idx+1] + hy[idx] );
if( i<nx-1 && k<nz-1 ) ey[idx] += cey[idx]*( hx[idx+1] - hx[idx] - hz[idx+nyz] + hz[idx] );
if( i<nx-1 && j<ny-1 ) ez[idx] += cez[idx]*( hy[idx+nyz] - hy[idx] - hx[idx+nz] + hx[idx] );
}
__global__ void update_src(int nx, int ny, int nz, int nyz, float tn, float *f) {
int idx = threadIdx.x;
int ijk = (nx/2)*nyz + (ny/2)*nz + idx;
if( idx < nz ) f[ijk] += sin(0.1*tn);
}
__global__ void init_zero(int n, int idx0, float *f) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
if( idx < n ) f[idx] = 0;
}
int main() {
int i, n, nx, ny, nz, tn, tmax;
nx = 320;
ny = 480;
nz = 480;
n = nx*ny*nz;
tmax = 100000;
printf("Simple FDTD simulation\n", nx, ny, nz);
printf("Array size : %dx%dx%d\n", nx, ny, nz);
printf("Total used memory : %1.2f GB\n", n*4*9./(1024*1024*1024));
printf("Iteration : %d step\n", tmax);
// memory allocate
float *f, *cf;
f = (float *) calloc (n, sizeof(float));
cf = (float *) calloc (n, sizeof(float));
for( i=0; i<n; i++ ) cf[i] = 0.5;
float *hx_gpu, *hy_gpu, *hz_gpu;
float *ex_gpu, *ey_gpu, *ez_gpu;
float *cex_gpu, *cey_gpu, *cez_gpu;
hipMalloc ( (void**) &hx_gpu, n*sizeof(float) );
hipMalloc ( (void**) &hy_gpu, n*sizeof(float) );
hipMalloc ( (void**) &hz_gpu, n*sizeof(float) );
hipMalloc ( (void**) &ex_gpu, n*sizeof(float) );
hipMalloc ( (void**) &ey_gpu, n*sizeof(float) );
hipMalloc ( (void**) &ez_gpu, n*sizeof(float) );
hipMalloc ( (void**) &cex_gpu, n*sizeof(float) );
hipMalloc ( (void**) &cey_gpu, n*sizeof(float) );
hipMalloc ( (void**) &cez_gpu, n*sizeof(float) );
hipMemcpy ( cex_gpu, cf, n*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy ( cey_gpu, cf, n*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy ( cez_gpu, cf, n*sizeof(float), hipMemcpyHostToDevice );
int ng = 6; // number of grid
int tpb = 256; // threads per block
int bpg = n/tpb/ng; // blocks per grid
for( i=0; i<ng; i++) {
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hx_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hy_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hz_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ex_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ey_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ez_gpu);
}
// main loop
for( tn=0; tn<tmax; tn++ ) {
for( i=0; i<ng; i++) update_h <<<dim3(bpg),dim3(tpb)>>> (nx, ny, nz, ny*nz, i*bpg*tpb, hx_gpu, hy_gpu, hz_gpu, ex_gpu, ey_gpu, ez_gpu);
for( i=0; i<ng; i++) update_e <<<dim3(bpg),dim3(tpb)>>> (nx, ny, nz, ny*nz, i*bpg*tpb, hx_gpu, hy_gpu, hz_gpu, ex_gpu, ey_gpu, ez_gpu, cex_gpu, cey_gpu, cez_gpu);
update_src <<<dim3(1),dim3(512)>>> (nx, ny, nz, ny*nz, tn, ez_gpu);
}
hipMemcpy( f, ez_gpu, n*sizeof(float), hipMemcpyDeviceToHost );
printf("Complete.\n");
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void update_h(int nx, int ny, int nz, int nyz, int idx0, float *hx, float *hy, float *hz, float *ex, float *ey, float *ez) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
int i = idx/(nyz);
int j = (idx - i*nyz)/nz;
int k = idx%nz;
if( j>0 && k>0 ) hx[idx] -= 0.5*( ez[idx] - ez[idx-nz] - ey[idx] + ey[idx-1] );
if( i>0 && k>0 ) hy[idx] -= 0.5*( ex[idx] - ex[idx-1] - ez[idx] + ez[idx-nyz] );
if( i>0 && j>0 ) hz[idx] -= 0.5*( ey[idx] - ey[idx-nyz] - ex[idx] + ex[idx-nz] );
}
__global__ void update_e(int nx, int ny, int nz, int nyz, int idx0, float *hx, float *hy, float *hz, float *ex, float *ey, float *ez, float *cex, float *cey, float *cez) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
int i = idx/(nyz);
int j = (idx - i*nyz)/nz;
int k = idx%nz;
if( j<ny-1 && k<nz-1 ) ex[idx] += cex[idx]*( hz[idx+nz] - hz[idx] - hy[idx+1] + hy[idx] );
if( i<nx-1 && k<nz-1 ) ey[idx] += cey[idx]*( hx[idx+1] - hx[idx] - hz[idx+nyz] + hz[idx] );
if( i<nx-1 && j<ny-1 ) ez[idx] += cez[idx]*( hy[idx+nyz] - hy[idx] - hx[idx+nz] + hx[idx] );
}
__global__ void update_src(int nx, int ny, int nz, int nyz, float tn, float *f) {
int idx = threadIdx.x;
int ijk = (nx/2)*nyz + (ny/2)*nz + idx;
if( idx < nz ) f[ijk] += sin(0.1*tn);
}
__global__ void init_zero(int n, int idx0, float *f) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
if( idx < n ) f[idx] = 0;
}
int main() {
int i, n, nx, ny, nz, tn, tmax;
nx = 320;
ny = 480;
nz = 480;
n = nx*ny*nz;
tmax = 100000;
printf("Simple FDTD simulation\n", nx, ny, nz);
printf("Array size : %dx%dx%d\n", nx, ny, nz);
printf("Total used memory : %1.2f GB\n", n*4*9./(1024*1024*1024));
printf("Iteration : %d step\n", tmax);
// memory allocate
float *f, *cf;
f = (float *) calloc (n, sizeof(float));
cf = (float *) calloc (n, sizeof(float));
for( i=0; i<n; i++ ) cf[i] = 0.5;
float *hx_gpu, *hy_gpu, *hz_gpu;
float *ex_gpu, *ey_gpu, *ez_gpu;
float *cex_gpu, *cey_gpu, *cez_gpu;
hipMalloc ( (void**) &hx_gpu, n*sizeof(float) );
hipMalloc ( (void**) &hy_gpu, n*sizeof(float) );
hipMalloc ( (void**) &hz_gpu, n*sizeof(float) );
hipMalloc ( (void**) &ex_gpu, n*sizeof(float) );
hipMalloc ( (void**) &ey_gpu, n*sizeof(float) );
hipMalloc ( (void**) &ez_gpu, n*sizeof(float) );
hipMalloc ( (void**) &cex_gpu, n*sizeof(float) );
hipMalloc ( (void**) &cey_gpu, n*sizeof(float) );
hipMalloc ( (void**) &cez_gpu, n*sizeof(float) );
hipMemcpy ( cex_gpu, cf, n*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy ( cey_gpu, cf, n*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy ( cez_gpu, cf, n*sizeof(float), hipMemcpyHostToDevice );
int ng = 6; // number of grid
int tpb = 256; // threads per block
int bpg = n/tpb/ng; // blocks per grid
for( i=0; i<ng; i++) {
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hx_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hy_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hz_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ex_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ey_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ez_gpu);
}
// main loop
for( tn=0; tn<tmax; tn++ ) {
for( i=0; i<ng; i++) update_h <<<dim3(bpg),dim3(tpb)>>> (nx, ny, nz, ny*nz, i*bpg*tpb, hx_gpu, hy_gpu, hz_gpu, ex_gpu, ey_gpu, ez_gpu);
for( i=0; i<ng; i++) update_e <<<dim3(bpg),dim3(tpb)>>> (nx, ny, nz, ny*nz, i*bpg*tpb, hx_gpu, hy_gpu, hz_gpu, ex_gpu, ey_gpu, ez_gpu, cex_gpu, cey_gpu, cez_gpu);
update_src <<<dim3(1),dim3(512)>>> (nx, ny, nz, ny*nz, tn, ez_gpu);
}
hipMemcpy( f, ez_gpu, n*sizeof(float), hipMemcpyDeviceToHost );
printf("Complete.\n");
return 0;
} | .text
.file "012-row_major.hip"
.globl _Z23__device_stub__update_hiiiiiPfS_S_S_S_S_ # -- Begin function _Z23__device_stub__update_hiiiiiPfS_S_S_S_S_
.p2align 4, 0x90
.type _Z23__device_stub__update_hiiiiiPfS_S_S_S_S_,@function
_Z23__device_stub__update_hiiiiiPfS_S_S_S_S_: # @_Z23__device_stub__update_hiiiiiPfS_S_S_S_S_
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 80(%rsp)
leaq 16(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 176(%rsp), %rax
movq %rax, 128(%rsp)
leaq 184(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8update_hiiiiiPfS_S_S_S_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z23__device_stub__update_hiiiiiPfS_S_S_S_S_, .Lfunc_end0-_Z23__device_stub__update_hiiiiiPfS_S_S_S_S_
.cfi_endproc
# -- End function
.globl _Z23__device_stub__update_eiiiiiPfS_S_S_S_S_S_S_S_ # -- Begin function _Z23__device_stub__update_eiiiiiPfS_S_S_S_S_S_S_S_
.p2align 4, 0x90
.type _Z23__device_stub__update_eiiiiiPfS_S_S_S_S_S_S_S_,@function
_Z23__device_stub__update_eiiiiiPfS_S_S_S_S_S_S_S_: # @_Z23__device_stub__update_eiiiiiPfS_S_S_S_S_S_S_S_
.cfi_startproc
# %bb.0:
subq $200, %rsp
.cfi_def_cfa_offset 208
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 80(%rsp)
leaq 16(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 208(%rsp), %rax
movq %rax, 128(%rsp)
leaq 216(%rsp), %rax
movq %rax, 136(%rsp)
leaq 224(%rsp), %rax
movq %rax, 144(%rsp)
leaq 232(%rsp), %rax
movq %rax, 152(%rsp)
leaq 240(%rsp), %rax
movq %rax, 160(%rsp)
leaq 248(%rsp), %rax
movq %rax, 168(%rsp)
leaq 256(%rsp), %rax
movq %rax, 176(%rsp)
leaq 264(%rsp), %rax
movq %rax, 184(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8update_eiiiiiPfS_S_S_S_S_S_S_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $216, %rsp
.cfi_adjust_cfa_offset -216
retq
.Lfunc_end1:
.size _Z23__device_stub__update_eiiiiiPfS_S_S_S_S_S_S_S_, .Lfunc_end1-_Z23__device_stub__update_eiiiiiPfS_S_S_S_S_S_S_S_
.cfi_endproc
# -- End function
.globl _Z25__device_stub__update_srciiiifPf # -- Begin function _Z25__device_stub__update_srciiiifPf
.p2align 4, 0x90
.type _Z25__device_stub__update_srciiiifPf,@function
_Z25__device_stub__update_srciiiifPf: # @_Z25__device_stub__update_srciiiifPf
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movss %xmm0, 4(%rsp)
movq %r8, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 80(%rsp)
leaq 16(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10update_srciiiifPf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end2:
.size _Z25__device_stub__update_srciiiifPf, .Lfunc_end2-_Z25__device_stub__update_srciiiifPf
.cfi_endproc
# -- End function
.globl _Z24__device_stub__init_zeroiiPf # -- Begin function _Z24__device_stub__init_zeroiiPf
.p2align 4, 0x90
.type _Z24__device_stub__init_zeroiiPf,@function
_Z24__device_stub__init_zeroiiPf: # @_Z24__device_stub__init_zeroiiPf
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movl %edi, 4(%rsp)
movl %esi, (%rsp)
movq %rdx, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 64(%rsp)
movq %rsp, %rax
movq %rax, 72(%rsp)
leaq 56(%rsp), %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z9init_zeroiiPf, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end3:
.size _Z24__device_stub__init_zeroiiPf, .Lfunc_end3-_Z24__device_stub__init_zeroiiPf
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI4_0:
.quad 0x4003c68000000000 # double 2.471923828125
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $360, %rsp # imm = 0x168
.cfi_def_cfa_offset 416
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $.Lstr, %edi
callq puts@PLT
xorl %r14d, %r14d
movl $.L.str.1, %edi
movl $320, %esi # imm = 0x140
movl $480, %edx # imm = 0x1E0
movl $480, %ecx # imm = 0x1E0
xorl %eax, %eax
callq printf
movsd .LCPI4_0(%rip), %xmm0 # xmm0 = mem[0],zero
movl $.L.str.2, %edi
movb $1, %al
callq printf
movl $.L.str.3, %edi
movl $100000, %esi # imm = 0x186A0
xorl %eax, %eax
callq printf
movl $73728000, %edi # imm = 0x4650000
movl $4, %esi
callq calloc
movq %rax, 328(%rsp) # 8-byte Spill
movl $73728000, %edi # imm = 0x4650000
movl $4, %esi
callq calloc
movq %rax, %rbx
.p2align 4, 0x90
.LBB4_1: # =>This Inner Loop Header: Depth=1
movl $1056964608, (%rbx,%r14,4) # imm = 0x3F000000
incq %r14
cmpq $73728000, %r14 # imm = 0x4650000
jne .LBB4_1
# %bb.2:
movabsq $4294967552, %r14 # imm = 0x100000100
leaq 128(%rsp), %rdi
movl $294912000, %esi # imm = 0x11940000
callq hipMalloc
leaq 120(%rsp), %rdi
movl $294912000, %esi # imm = 0x11940000
callq hipMalloc
leaq 112(%rsp), %rdi
movl $294912000, %esi # imm = 0x11940000
callq hipMalloc
leaq 104(%rsp), %rdi
movl $294912000, %esi # imm = 0x11940000
callq hipMalloc
leaq 96(%rsp), %rdi
movl $294912000, %esi # imm = 0x11940000
callq hipMalloc
leaq 80(%rsp), %rdi
movl $294912000, %esi # imm = 0x11940000
callq hipMalloc
leaq 320(%rsp), %rdi
movl $294912000, %esi # imm = 0x11940000
callq hipMalloc
leaq 312(%rsp), %rdi
movl $294912000, %esi # imm = 0x11940000
callq hipMalloc
leaq 304(%rsp), %rdi
movl $294912000, %esi # imm = 0x11940000
callq hipMalloc
movq 320(%rsp), %rdi
movl $294912000, %edx # imm = 0x11940000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 312(%rsp), %rdi
movl $294912000, %edx # imm = 0x11940000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 304(%rsp), %rdi
movl $294912000, %edx # imm = 0x11940000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
xorl %r12d, %r12d
leaq 47744(%r14), %r15
leaq 56(%rsp), %r13
leaq 40(%rsp), %rbx
leaq 192(%rsp), %rbp
jmp .LBB4_3
.p2align 4, 0x90
.LBB4_15: # in Loop: Header=BB4_3 Depth=1
addl $12288000, %r12d # imm = 0xBB8000
cmpl $73728000, %r12d # imm = 0x4650000
je .LBB4_16
.LBB4_3: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_5
# %bb.4: # in Loop: Header=BB4_3 Depth=1
movq 128(%rsp), %rax
movl $73728000, 8(%rsp) # imm = 0x4650000
movl %r12d, (%rsp)
movq %rax, 32(%rsp)
leaq 8(%rsp), %rax
movq %rax, 192(%rsp)
movq %rsp, %rax
movq %rax, 200(%rsp)
leaq 32(%rsp), %rax
movq %rax, 208(%rsp)
movq %r13, %rdi
movq %rbx, %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
movl $_Z9init_zeroiiPf, %edi
movq %rbp, %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_5: # in Loop: Header=BB4_3 Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_7
# %bb.6: # in Loop: Header=BB4_3 Depth=1
movq 120(%rsp), %rax
movl $73728000, 8(%rsp) # imm = 0x4650000
movl %r12d, (%rsp)
movq %rax, 32(%rsp)
leaq 8(%rsp), %rax
movq %rax, 192(%rsp)
movq %rsp, %rax
movq %rax, 200(%rsp)
leaq 32(%rsp), %rax
movq %rax, 208(%rsp)
movq %r13, %rdi
movq %rbx, %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
movl $_Z9init_zeroiiPf, %edi
movq %rbp, %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_7: # in Loop: Header=BB4_3 Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_9
# %bb.8: # in Loop: Header=BB4_3 Depth=1
movq 112(%rsp), %rax
movl $73728000, 8(%rsp) # imm = 0x4650000
movl %r12d, (%rsp)
movq %rax, 32(%rsp)
leaq 8(%rsp), %rax
movq %rax, 192(%rsp)
movq %rsp, %rax
movq %rax, 200(%rsp)
leaq 32(%rsp), %rax
movq %rax, 208(%rsp)
movq %r13, %rdi
movq %rbx, %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
movl $_Z9init_zeroiiPf, %edi
movq %rbp, %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_9: # in Loop: Header=BB4_3 Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_11
# %bb.10: # in Loop: Header=BB4_3 Depth=1
movq 104(%rsp), %rax
movl $73728000, 8(%rsp) # imm = 0x4650000
movl %r12d, (%rsp)
movq %rax, 32(%rsp)
leaq 8(%rsp), %rax
movq %rax, 192(%rsp)
movq %rsp, %rax
movq %rax, 200(%rsp)
leaq 32(%rsp), %rax
movq %rax, 208(%rsp)
movq %r13, %rdi
movq %rbx, %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
movl $_Z9init_zeroiiPf, %edi
movq %rbp, %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_11: # in Loop: Header=BB4_3 Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_13
# %bb.12: # in Loop: Header=BB4_3 Depth=1
movq 96(%rsp), %rax
movl $73728000, 8(%rsp) # imm = 0x4650000
movl %r12d, (%rsp)
movq %rax, 32(%rsp)
leaq 8(%rsp), %rax
movq %rax, 192(%rsp)
movq %rsp, %rax
movq %rax, 200(%rsp)
leaq 32(%rsp), %rax
movq %rax, 208(%rsp)
movq %r13, %rdi
movq %rbx, %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
movl $_Z9init_zeroiiPf, %edi
movq %rbp, %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_13: # in Loop: Header=BB4_3 Depth=1
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_15
# %bb.14: # in Loop: Header=BB4_3 Depth=1
movq 80(%rsp), %rax
movl $73728000, 8(%rsp) # imm = 0x4650000
movl %r12d, (%rsp)
movq %rax, 32(%rsp)
leaq 8(%rsp), %rax
movq %rax, 192(%rsp)
movq %rsp, %rax
movq %rax, 200(%rsp)
leaq 32(%rsp), %rax
movq %rax, 208(%rsp)
movq %r13, %rdi
movq %rbx, %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
movl $_Z9init_zeroiiPf, %edi
movq %rbp, %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB4_15
.LBB4_16: # %.preheader255
xorl %r13d, %r13d
leaq 168(%rsp), %rbx
leaq 160(%rsp), %rbp
leaq 192(%rsp), %r12
leaq -255(%r14), %rax
movq %rax, 344(%rsp) # 8-byte Spill
leaq 256(%r14), %rax
movq %rax, 336(%rsp) # 8-byte Spill
jmp .LBB4_17
.p2align 4, 0x90
.LBB4_25: # in Loop: Header=BB4_17 Depth=1
movq 344(%rsp), %rdi # 8-byte Reload
movl $1, %esi
movq 336(%rsp), %rdx # 8-byte Reload
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
movq 352(%rsp), %r13 # 8-byte Reload
je .LBB4_26
.LBB4_27: # in Loop: Header=BB4_17 Depth=1
incl %r13d
cmpl $100000, %r13d # imm = 0x186A0
je .LBB4_28
.LBB4_17: # %.preheader254
# =>This Loop Header: Depth=1
# Child Loop BB4_18 Depth 2
# Child Loop BB4_22 Depth 2
movq %r13, 352(%rsp) # 8-byte Spill
xorl %r13d, %r13d
jmp .LBB4_18
.p2align 4, 0x90
.LBB4_20: # in Loop: Header=BB4_18 Depth=2
addl $12288000, %r13d # imm = 0xBB8000
cmpl $73728000, %r13d # imm = 0x4650000
je .LBB4_21
.LBB4_18: # Parent Loop BB4_17 Depth=1
# => This Inner Loop Header: Depth=2
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_20
# %bb.19: # in Loop: Header=BB4_18 Depth=2
movq 128(%rsp), %rax
movq %rax, 32(%rsp)
movq 120(%rsp), %rax
movq %rax, 24(%rsp)
movq 112(%rsp), %rax
movq %rax, 16(%rsp)
movq 104(%rsp), %rax
movq %rax, 8(%rsp)
movq 96(%rsp), %rax
movq %rax, (%rsp)
movq 80(%rsp), %rax
movq %rax, 88(%rsp)
movl $320, 152(%rsp) # imm = 0x140
movl $480, 144(%rsp) # imm = 0x1E0
movl $480, 136(%rsp) # imm = 0x1E0
movl $230400, 76(%rsp) # imm = 0x38400
movl %r13d, 72(%rsp)
leaq 152(%rsp), %rax
movq %rax, 192(%rsp)
leaq 144(%rsp), %rax
movq %rax, 200(%rsp)
leaq 136(%rsp), %rax
movq %rax, 208(%rsp)
leaq 76(%rsp), %rax
movq %rax, 216(%rsp)
leaq 72(%rsp), %rax
movq %rax, 224(%rsp)
leaq 32(%rsp), %rax
movq %rax, 232(%rsp)
leaq 24(%rsp), %rax
movq %rax, 240(%rsp)
leaq 16(%rsp), %rax
movq %rax, 248(%rsp)
leaq 8(%rsp), %rax
movq %rax, 256(%rsp)
movq %rsp, %rax
movq %rax, 264(%rsp)
leaq 88(%rsp), %rax
movq %rax, 272(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
movq %rbx, %rdx
movq %rbp, %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
movl $_Z8update_hiiiiiPfS_S_S_S_S_, %edi
movq %r12, %r9
pushq 160(%rsp)
.cfi_adjust_cfa_offset 8
pushq 176(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB4_20
.p2align 4, 0x90
.LBB4_21: # %.preheader.preheader
# in Loop: Header=BB4_17 Depth=1
xorl %r13d, %r13d
jmp .LBB4_22
.p2align 4, 0x90
.LBB4_24: # in Loop: Header=BB4_22 Depth=2
addl $12288000, %r13d # imm = 0xBB8000
cmpl $73728000, %r13d # imm = 0x4650000
je .LBB4_25
.LBB4_22: # %.preheader
# Parent Loop BB4_17 Depth=1
# => This Inner Loop Header: Depth=2
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_24
# %bb.23: # in Loop: Header=BB4_22 Depth=2
movq 128(%rsp), %rax
movq %rax, 32(%rsp)
movq 120(%rsp), %rax
movq %rax, 24(%rsp)
movq 112(%rsp), %rax
movq %rax, 16(%rsp)
movq 104(%rsp), %rax
movq %rax, 8(%rsp)
movq 96(%rsp), %rax
movq %rax, (%rsp)
movq 80(%rsp), %rax
movq %rax, 88(%rsp)
movq 320(%rsp), %rax
movq %rax, 168(%rsp)
movq 312(%rsp), %rax
movq %rax, 160(%rsp)
movq 304(%rsp), %rax
movq %rax, 152(%rsp)
movl $320, 76(%rsp) # imm = 0x140
movl $480, 72(%rsp) # imm = 0x1E0
movl $480, 188(%rsp) # imm = 0x1E0
movl $230400, 184(%rsp) # imm = 0x38400
movl %r13d, 180(%rsp)
leaq 76(%rsp), %rax
movq %rax, 192(%rsp)
leaq 72(%rsp), %rax
movq %rax, 200(%rsp)
leaq 188(%rsp), %rax
movq %rax, 208(%rsp)
leaq 184(%rsp), %rax
movq %rax, 216(%rsp)
leaq 180(%rsp), %rax
movq %rax, 224(%rsp)
leaq 32(%rsp), %rax
movq %rax, 232(%rsp)
leaq 24(%rsp), %rax
movq %rax, 240(%rsp)
leaq 16(%rsp), %rax
movq %rax, 248(%rsp)
leaq 8(%rsp), %rax
movq %rax, 256(%rsp)
movq %rsp, %rax
movq %rax, 264(%rsp)
leaq 88(%rsp), %rax
movq %rax, 272(%rsp)
movq %rbx, 280(%rsp)
movq %rbp, 288(%rsp)
leaq 152(%rsp), %rax
movq %rax, 296(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 144(%rsp), %rdx
leaq 136(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
movl $_Z8update_eiiiiiPfS_S_S_S_S_S_S_S_, %edi
movq %r12, %r9
pushq 136(%rsp)
.cfi_adjust_cfa_offset 8
pushq 152(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB4_24
.p2align 4, 0x90
.LBB4_26: # in Loop: Header=BB4_17 Depth=1
cvtsi2ss %r13d, %xmm0
movq 80(%rsp), %rax
movl $320, 8(%rsp) # imm = 0x140
movl $480, (%rsp) # imm = 0x1E0
movl $480, 88(%rsp) # imm = 0x1E0
movl $230400, 168(%rsp) # imm = 0x38400
movss %xmm0, 160(%rsp)
movq %rax, 32(%rsp)
leaq 8(%rsp), %rax
movq %rax, 192(%rsp)
movq %rsp, %rax
movq %rax, 200(%rsp)
leaq 88(%rsp), %rax
movq %rax, 208(%rsp)
movq %rbx, 216(%rsp)
movq %rbp, 224(%rsp)
leaq 32(%rsp), %rax
movq %rax, 232(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
movl $_Z10update_srciiiifPf, %edi
movq %r12, %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB4_27
.LBB4_28:
movq 80(%rsp), %rsi
movl $294912000, %edx # imm = 0x11940000
movq 328(%rsp), %rdi # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movl $.Lstr.1, %edi
callq puts@PLT
xorl %eax, %eax
addq $360, %rsp # imm = 0x168
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8update_hiiiiiPfS_S_S_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8update_eiiiiiPfS_S_S_S_S_S_S_S_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10update_srciiiifPf, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9init_zeroiiPf, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8update_hiiiiiPfS_S_S_S_S_,@object # @_Z8update_hiiiiiPfS_S_S_S_S_
.section .rodata,"a",@progbits
.globl _Z8update_hiiiiiPfS_S_S_S_S_
.p2align 3, 0x0
_Z8update_hiiiiiPfS_S_S_S_S_:
.quad _Z23__device_stub__update_hiiiiiPfS_S_S_S_S_
.size _Z8update_hiiiiiPfS_S_S_S_S_, 8
.type _Z8update_eiiiiiPfS_S_S_S_S_S_S_S_,@object # @_Z8update_eiiiiiPfS_S_S_S_S_S_S_S_
.globl _Z8update_eiiiiiPfS_S_S_S_S_S_S_S_
.p2align 3, 0x0
_Z8update_eiiiiiPfS_S_S_S_S_S_S_S_:
.quad _Z23__device_stub__update_eiiiiiPfS_S_S_S_S_S_S_S_
.size _Z8update_eiiiiiPfS_S_S_S_S_S_S_S_, 8
.type _Z10update_srciiiifPf,@object # @_Z10update_srciiiifPf
.globl _Z10update_srciiiifPf
.p2align 3, 0x0
_Z10update_srciiiifPf:
.quad _Z25__device_stub__update_srciiiifPf
.size _Z10update_srciiiifPf, 8
.type _Z9init_zeroiiPf,@object # @_Z9init_zeroiiPf
.globl _Z9init_zeroiiPf
.p2align 3, 0x0
_Z9init_zeroiiPf:
.quad _Z24__device_stub__init_zeroiiPf
.size _Z9init_zeroiiPf, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Array size : %dx%dx%d\n"
.size .L.str.1, 23
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Total used memory : %1.2f GB\n"
.size .L.str.2, 30
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Iteration : %d step\n"
.size .L.str.3, 21
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z8update_hiiiiiPfS_S_S_S_S_"
.size .L__unnamed_1, 29
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z8update_eiiiiiPfS_S_S_S_S_S_S_S_"
.size .L__unnamed_2, 35
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z10update_srciiiifPf"
.size .L__unnamed_3, 22
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z9init_zeroiiPf"
.size .L__unnamed_4, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Simple FDTD simulation"
.size .Lstr, 23
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Complete."
.size .Lstr.1, 10
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__update_hiiiiiPfS_S_S_S_S_
.addrsig_sym _Z23__device_stub__update_eiiiiiPfS_S_S_S_S_S_S_S_
.addrsig_sym _Z25__device_stub__update_srciiiifPf
.addrsig_sym _Z24__device_stub__init_zeroiiPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8update_hiiiiiPfS_S_S_S_S_
.addrsig_sym _Z8update_eiiiiiPfS_S_S_S_S_S_S_S_
.addrsig_sym _Z10update_srciiiifPf
.addrsig_sym _Z9init_zeroiiPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // Write a first sequential implementation (matmult gpu1()) of matrix multiplication on the
// GPU that uses only a single thread. It should work for all matrix sizes. Hints:
// – You need CUDA code to allocate memory on the GPU, transfer A and B to the
// GPU, transfer C back to the CPU, and free the allocated memory.
//
// Time your kernel for small matrix sizes and compare to the reference DGEMM on the CPU.
// matrix times matrix
// m represents the number of rows (the vertical length) of A and C,
// k represents the number of columns of A and the n. of rows of B,
// n represents the number of columns (the horizontal length) of B and C.
// ____k____ ____n____ ____n____
// | | | | | |
// m | A | X k | B | = m | C |
// | | | | | |
// --------- --------- ---------
__global__ void m1(int m, int n, int k, double *A, double *B, double *C) {
// for (int i = 0; i < m; i++) {
// for (int j = 0; j < n; j++) {
// C[i*n + j] = 0;
// }
// }
for (int i = 0; i < m; i++) {
for (int h = 0; h < k; h++){
for (int j = 0; j < n; j++) {
C[i*n + j] += A[i*k + h] * B[h*n + j];
}
}
}
}
extern "C" {
void matmult_gpu1(int m, int n, int k, double *A, double *B, double *C) {
double* d_A, * d_B, * d_C;
cudaSetDevice(2);
cudaMalloc((void**)&d_A, m*k * sizeof(double));
cudaMalloc((void**)&d_B, k*n * sizeof(double));
cudaMalloc((void**)&d_C, m*n * sizeof(double));
cudaMemcpy(d_A, A, m*k * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, k*n * sizeof(double), cudaMemcpyHostToDevice);
// Initialize the output matrix with zeroes.
cudaMemset(d_C, 0, m*n * sizeof(double));
m1<<<1,1>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(C, d_C, m*n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
} | code for sm_80
Function : _Z2m1iiiPdS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R0, RZ, RZ, 0x1 ; /* 0x00000001ff007424 */
/* 0x000fca00078e00ff */
/*0020*/ ISETP.LE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fda0003f03270 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ ISETP.LE.AND P0, PT, R0, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */
/* 0x000fda0003f03270 */
/*0050*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R0, RZ, RZ, 0x1 ; /* 0x00000001ff007424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff027624 */
/* 0x000fc600078e00ff */
/*0090*/ IADD3 R0, -R0, c[0x0][0x164], RZ ; /* 0x0000590000007a10 */
/* 0x000fc80007ffe1ff */
/*00a0*/ ISETP.GE.U32.AND P1, PT, R0, 0x3, PT ; /* 0x000000030000780c */
/* 0x000fe40003f26070 */
/*00b0*/ LOP3.LUT R0, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302007812 */
/* 0x000fe200078ec0ff */
/*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x000fc600078e00ff */
/*00d0*/ IADD3 R3, -R0, c[0x0][0x164], RZ ; /* 0x0000590000037a10 */
/* 0x000fe40007ffe1ff */
/*00e0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff047624 */
/* 0x000fca00078e00ff */
/*00f0*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fda0003f06270 */
/*0100*/ @!P0 BRA 0xf00 ; /* 0x00000df000008947 */
/* 0x003fea0003800000 */
/*0110*/ IMAD R4, R2, c[0x0][0x164], RZ ; /* 0x0000590002047a24 */
/* 0x000fe400078e02ff */
/*0120*/ IMAD.MOV.U32 R5, RZ, RZ, RZ ; /* 0x000000ffff057224 */
/* 0x000fc800078e00ff */
/*0130*/ IMAD R10, R2, c[0x0][0x168], R5 ; /* 0x00005a00020a7a24 */
/* 0x000fe400078e0205 */
/*0140*/ IMAD R6, R5.reuse, c[0x0][0x164], RZ ; /* 0x0000590005067a24 */
/* 0x041fe200078e02ff */
/*0150*/ IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105057810 */
/* 0x000fe20007ffe0ff */
/*0160*/ IMAD.MOV.U32 R7, RZ, RZ, 0x8 ; /* 0x00000008ff077424 */
/* 0x000fe400078e00ff */
/*0170*/ IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff097224 */
/* 0x000fe200078e00ff */
/*0180*/ ISETP.GE.AND P2, PT, R5, c[0x0][0x168], PT ; /* 0x00005a0005007a0c */
/* 0x000fe20003f46270 */
/*0190*/ IMAD.WIDE R10, R10, R7, c[0x0][0x170] ; /* 0x00005c000a0a7625 */
/* 0x000fe200078e0207 */
/*01a0*/ @!P1 BRA 0xd60 ; /* 0x00000bb000009947 */
/* 0x002fea0003800000 */
/*01b0*/ ISETP.GT.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fe20003f04270 */
/*01c0*/ IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff097224 */
/* 0x000fc400078e00ff */
/*01d0*/ IMAD.MOV.U32 R22, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff167624 */
/* 0x000fe400078e00ff */
/*01e0*/ IMAD.MOV.U32 R23, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff177624 */
/* 0x000fe400078e00ff */
/*01f0*/ IMAD.MOV.U32 R8, RZ, RZ, R3 ; /* 0x000000ffff087224 */
/* 0x000fe400078e0003 */
/*0200*/ IMAD.WIDE R12, R6, R7, c[0x0][0x178] ; /* 0x00005e00060c7625 */
/* 0x000fc800078e0207 */
/*0210*/ @!P0 BRA 0xb70 ; /* 0x0000095000008947 */
/* 0x000fea0003800000 */
/*0220*/ ISETP.GT.AND P3, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe40003f64270 */
/*0230*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0240*/ @!P3 BRA 0x810 ; /* 0x000005c00000b947 */
/* 0x000fea0003800000 */
/*0250*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0260*/ IMAD.WIDE R20, R4, 0x8, R22 ; /* 0x0000000804147825 */
/* 0x001fe200078e0216 */
/*0270*/ LDG.E.64 R18, [R12.64] ; /* 0x000000040c127981 */
/* 0x000ea8000c1e1b00 */
/*0280*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ea8000c1e1b00 */
/*0290*/ LDG.E.64 R14, [R20.64] ; /* 0x00000004140e7981 */
/* 0x000ea4000c1e1b00 */
/*02a0*/ DFMA R28, R18, R16, R14 ; /* 0x00000010121c722b */
/* 0x004044000000000e */
/*02b0*/ LDG.E.64 R14, [R20.64+0x8] ; /* 0x00000804140e7981 */
/* 0x001eaa000c1e1b00 */
/*02c0*/ STG.E.64 [R20.64], R28 ; /* 0x0000001c14007986 */
/* 0x0021e8000c101b04 */
/*02d0*/ LDG.E.64 R18, [R12.64+0x8] ; /* 0x000008040c127981 */
/* 0x000ea8000c1e1b00 */
/*02e0*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ea4000c1e1b00 */
/*02f0*/ DFMA R26, R18, R16, R14 ; /* 0x00000010121a722b */
/* 0x004284000000000e */
/*0300*/ LDG.E.64 R14, [R20.64+0x10] ; /* 0x00001004140e7981 */
/* 0x002eea000c1e1b00 */
/*0310*/ STG.E.64 [R20.64+0x8], R26 ; /* 0x0000081a14007986 */
/* 0x0043e8000c101b04 */
/*0320*/ LDG.E.64 R18, [R12.64+0x10] ; /* 0x000010040c127981 */
/* 0x000ee8000c1e1b00 */
/*0330*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ee4000c1e1b00 */
/*0340*/ DFMA R24, R18, R16, R14 ; /* 0x000000101218722b */
/* 0x0084c4000000000e */
/*0350*/ LDG.E.64 R14, [R20.64+0x18] ; /* 0x00001804140e7981 */
/* 0x004e2a000c1e1b00 */
/*0360*/ STG.E.64 [R20.64+0x10], R24 ; /* 0x0000101814007986 */
/* 0x0085e8000c101b04 */
/*0370*/ LDG.E.64 R18, [R12.64+0x18] ; /* 0x000018040c127981 */
/* 0x000e28000c1e1b00 */
/*0380*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e24000c1e1b00 */
/*0390*/ DFMA R28, R18, R16, R14 ; /* 0x00000010121c722b */
/* 0x0010c4000000000e */
/*03a0*/ LDG.E.64 R14, [R20.64+0x20] ; /* 0x00002004140e7981 */
/* 0x001e6a000c1e1b00 */
/*03b0*/ STG.E.64 [R20.64+0x18], R28 ; /* 0x0000181c14007986 */
/* 0x0081e8000c101b04 */
/*03c0*/ LDG.E.64 R18, [R12.64+0x20] ; /* 0x000020040c127981 */
/* 0x000e68000c1e1b00 */
/*03d0*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e64000c1e1b00 */
/*03e0*/ DFMA R26, R18, R16, R14 ; /* 0x00000010121a722b */
/* 0x0022c4000000000e */
/*03f0*/ LDG.E.64 R14, [R20.64+0x28] ; /* 0x00002804140e7981 */
/* 0x002eaa000c1e1b00 */
/*0400*/ STG.E.64 [R20.64+0x20], R26 ; /* 0x0000201a14007986 */
/* 0x0083e8000c101b04 */
/*0410*/ LDG.E.64 R18, [R12.64+0x28] ; /* 0x000028040c127981 */
/* 0x000ea8000c1e1b00 */
/*0420*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ea4000c1e1b00 */
/*0430*/ DFMA R24, R18, R16, R14 ; /* 0x000000101218722b */
/* 0x0044c4000000000e */
/*0440*/ LDG.E.64 R14, [R20.64+0x30] ; /* 0x00003004140e7981 */
/* 0x004e2a000c1e1b00 */
/*0450*/ STG.E.64 [R20.64+0x28], R24 ; /* 0x0000281814007986 */
/* 0x0085e8000c101b04 */
/*0460*/ LDG.E.64 R18, [R12.64+0x30] ; /* 0x000030040c127981 */
/* 0x000e28000c1e1b00 */
/*0470*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e24000c1e1b00 */
/*0480*/ DFMA R28, R18, R16, R14 ; /* 0x00000010121c722b */
/* 0x0010c4000000000e */
/*0490*/ LDG.E.64 R14, [R20.64+0x38] ; /* 0x00003804140e7981 */
/* 0x001e6a000c1e1b00 */
/*04a0*/ STG.E.64 [R20.64+0x30], R28 ; /* 0x0000301c14007986 */
/* 0x0081e8000c101b04 */
/*04b0*/ LDG.E.64 R18, [R12.64+0x38] ; /* 0x000038040c127981 */
/* 0x000e68000c1e1b00 */
/*04c0*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e64000c1e1b00 */
/*04d0*/ DFMA R26, R18, R16, R14 ; /* 0x00000010121a722b */
/* 0x0022c4000000000e */
/*04e0*/ LDG.E.64 R14, [R20.64+0x40] ; /* 0x00004004140e7981 */
/* 0x002eaa000c1e1b00 */
/*04f0*/ STG.E.64 [R20.64+0x38], R26 ; /* 0x0000381a14007986 */
/* 0x0083e8000c101b04 */
/*0500*/ LDG.E.64 R18, [R12.64+0x40] ; /* 0x000040040c127981 */
/* 0x000ea8000c1e1b00 */
/*0510*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ea4000c1e1b00 */
/*0520*/ DFMA R24, R18, R16, R14 ; /* 0x000000101218722b */
/* 0x0044c4000000000e */
/*0530*/ LDG.E.64 R14, [R20.64+0x48] ; /* 0x00004804140e7981 */
/* 0x004e2a000c1e1b00 */
/*0540*/ STG.E.64 [R20.64+0x40], R24 ; /* 0x0000401814007986 */
/* 0x0085e8000c101b04 */
/*0550*/ LDG.E.64 R18, [R12.64+0x48] ; /* 0x000048040c127981 */
/* 0x000e28000c1e1b00 */
/*0560*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e24000c1e1b00 */
/*0570*/ DFMA R28, R18, R16, R14 ; /* 0x00000010121c722b */
/* 0x0010c4000000000e */
/*0580*/ LDG.E.64 R14, [R20.64+0x50] ; /* 0x00005004140e7981 */
/* 0x001e6a000c1e1b00 */
/*0590*/ STG.E.64 [R20.64+0x48], R28 ; /* 0x0000481c14007986 */
/* 0x0081e8000c101b04 */
/*05a0*/ LDG.E.64 R18, [R12.64+0x50] ; /* 0x000050040c127981 */
/* 0x000e68000c1e1b00 */
/*05b0*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e64000c1e1b00 */
/*05c0*/ DFMA R26, R18, R16, R14 ; /* 0x00000010121a722b */
/* 0x0022c4000000000e */
/*05d0*/ LDG.E.64 R14, [R20.64+0x58] ; /* 0x00005804140e7981 */
/* 0x002eaa000c1e1b00 */
/*05e0*/ STG.E.64 [R20.64+0x50], R26 ; /* 0x0000501a14007986 */
/* 0x0083e8000c101b04 */
/*05f0*/ LDG.E.64 R18, [R12.64+0x58] ; /* 0x000058040c127981 */
/* 0x000ea8000c1e1b00 */
/*0600*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ea4000c1e1b00 */
/*0610*/ DFMA R24, R18, R16, R14 ; /* 0x000000101218722b */
/* 0x0044c4000000000e */
/*0620*/ LDG.E.64 R14, [R20.64+0x60] ; /* 0x00006004140e7981 */
/* 0x004e2a000c1e1b00 */
/*0630*/ STG.E.64 [R20.64+0x58], R24 ; /* 0x0000581814007986 */
/* 0x0085e8000c101b04 */
/*0640*/ LDG.E.64 R18, [R12.64+0x60] ; /* 0x000060040c127981 */
/* 0x000e28000c1e1b00 */
/*0650*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e24000c1e1b00 */
/*0660*/ DFMA R28, R18, R16, R14 ; /* 0x00000010121c722b */
/* 0x0010c4000000000e */
/*0670*/ LDG.E.64 R14, [R20.64+0x68] ; /* 0x00006804140e7981 */
/* 0x001e6a000c1e1b00 */
/*0680*/ STG.E.64 [R20.64+0x60], R28 ; /* 0x0000601c14007986 */
/* 0x0081e8000c101b04 */
/*0690*/ LDG.E.64 R18, [R12.64+0x68] ; /* 0x000068040c127981 */
/* 0x000e68000c1e1b00 */
/*06a0*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e64000c1e1b00 */
/*06b0*/ DFMA R26, R18, R16, R14 ; /* 0x00000010121a722b */
/* 0x0022c4000000000e */
/*06c0*/ LDG.E.64 R14, [R20.64+0x70] ; /* 0x00007004140e7981 */
/* 0x002eaa000c1e1b00 */
/*06d0*/ STG.E.64 [R20.64+0x68], R26 ; /* 0x0000681a14007986 */
/* 0x0081e8000c101b04 */
/*06e0*/ LDG.E.64 R18, [R12.64+0x70] ; /* 0x000070040c127981 */
/* 0x000ea8000c1e1b00 */
/*06f0*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ea2000c1e1b00 */
/*0700*/ IADD3 R8, R8, -0x10, RZ ; /* 0xfffffff008087810 */
/* 0x000fe20007ffe0ff */
/*0710*/ DFMA R24, R18, R16, R14 ; /* 0x000000101218722b */
/* 0x004284000000000e */
/*0720*/ LDG.E.64 R14, [R20.64+0x78] ; /* 0x00007804140e7981 */
/* 0x002eea000c1e1b00 */
/*0730*/ STG.E.64 [R20.64+0x70], R24 ; /* 0x0000701814007986 */
/* 0x0041e8000c101b04 */
/*0740*/ LDG.E.64 R18, [R12.64+0x78] ; /* 0x000078040c127981 */
/* 0x000ee8000c1e1b00 */
/*0750*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ee2000c1e1b00 */
/*0760*/ ISETP.GT.AND P3, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fc40003f64270 */
/*0770*/ IADD3 R22, P4, R22, 0x80, RZ ; /* 0x0000008016167810 */
/* 0x000fe40007f9e0ff */
/*0780*/ IADD3 R9, R9, 0x10, RZ ; /* 0x0000001009097810 */
/* 0x000fc60007ffe0ff */
/*0790*/ IMAD.X R23, RZ, RZ, R23, P4 ; /* 0x000000ffff177224 */
/* 0x000fe200020e0617 */
/*07a0*/ DFMA R14, R18, R16, R14 ; /* 0x00000010120e722b */
/* 0x0082a4000000000e */
/*07b0*/ IADD3 R16, P5, R12, 0x80, RZ ; /* 0x000000800c107810 */
/* 0x002fc80007fbe0ff */
/*07c0*/ IADD3.X R17, RZ, R13, RZ, P5, !PT ; /* 0x0000000dff117210 */
/* 0x000fe20002ffe4ff */
/*07d0*/ STG.E.64 [R20.64+0x78], R14 ; /* 0x0000780e14007986 */
/* 0x0041e2000c101b04 */
/*07e0*/ IMAD.MOV.U32 R12, RZ, RZ, R16 ; /* 0x000000ffff0c7224 */
/* 0x000fc600078e0010 */
/*07f0*/ IMAD.MOV.U32 R13, RZ, RZ, R17 ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e0011 */
/*0800*/ @P3 BRA 0x260 ; /* 0xfffffa5000003947 */
/* 0x000fea000383ffff */
/*0810*/ ISETP.GT.AND P3, PT, R8, 0x4, PT ; /* 0x000000040800780c */
/* 0x000fda0003f64270 */
/*0820*/ @!P3 BRA 0xb50 ; /* 0x000003200000b947 */
/* 0x000fea0003800000 */
/*0830*/ IMAD.WIDE R14, R4, 0x8, R22 ; /* 0x00000008040e7825 */
/* 0x001fe200078e0216 */
/*0840*/ LDG.E.64 R20, [R12.64] ; /* 0x000000040c147981 */
/* 0x000ea8000c1e1b00 */
/*0850*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea8000c1e1b00 */
/*0860*/ LDG.E.64 R16, [R14.64] ; /* 0x000000040e107981 */
/* 0x000ea4000c1e1b00 */
/*0870*/ DFMA R24, R20, R18, R16 ; /* 0x000000121418722b */
/* 0x0040440000000010 */
/*0880*/ LDG.E.64 R16, [R14.64+0x8] ; /* 0x000008040e107981 */
/* 0x001eaa000c1e1b00 */
/*0890*/ STG.E.64 [R14.64], R24 ; /* 0x000000180e007986 */
/* 0x0021e8000c101b04 */
/*08a0*/ LDG.E.64 R20, [R12.64+0x8] ; /* 0x000008040c147981 */
/* 0x000ea8000c1e1b00 */
/*08b0*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea4000c1e1b00 */
/*08c0*/ DFMA R28, R20, R18, R16 ; /* 0x00000012141c722b */
/* 0x0042840000000010 */
/*08d0*/ LDG.E.64 R16, [R14.64+0x10] ; /* 0x000010040e107981 */
/* 0x002eea000c1e1b00 */
/*08e0*/ STG.E.64 [R14.64+0x8], R28 ; /* 0x0000081c0e007986 */
/* 0x0043e8000c101b04 */
/*08f0*/ LDG.E.64 R20, [R12.64+0x10] ; /* 0x000010040c147981 */
/* 0x000ee8000c1e1b00 */
/*0900*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ee4000c1e1b00 */
/*0910*/ DFMA R26, R20, R18, R16 ; /* 0x00000012141a722b */
/* 0x0084c40000000010 */
/*0920*/ LDG.E.64 R16, [R14.64+0x18] ; /* 0x000018040e107981 */
/* 0x004e2a000c1e1b00 */
/*0930*/ STG.E.64 [R14.64+0x10], R26 ; /* 0x0000101a0e007986 */
/* 0x0085e8000c101b04 */
/*0940*/ LDG.E.64 R20, [R12.64+0x18] ; /* 0x000018040c147981 */
/* 0x000e28000c1e1b00 */
/*0950*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000e24000c1e1b00 */
/*0960*/ DFMA R24, R20, R18, R16 ; /* 0x000000121418722b */
/* 0x0010c40000000010 */
/*0970*/ LDG.E.64 R16, [R14.64+0x20] ; /* 0x000020040e107981 */
/* 0x001e6a000c1e1b00 */
/*0980*/ STG.E.64 [R14.64+0x18], R24 ; /* 0x000018180e007986 */
/* 0x0081e8000c101b04 */
/*0990*/ LDG.E.64 R20, [R12.64+0x20] ; /* 0x000020040c147981 */
/* 0x000e68000c1e1b00 */
/*09a0*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000e64000c1e1b00 */
/*09b0*/ DFMA R28, R20, R18, R16 ; /* 0x00000012141c722b */
/* 0x0022c40000000010 */
/*09c0*/ LDG.E.64 R16, [R14.64+0x28] ; /* 0x000028040e107981 */
/* 0x002eaa000c1e1b00 */
/*09d0*/ STG.E.64 [R14.64+0x20], R28 ; /* 0x0000201c0e007986 */
/* 0x0083e8000c101b04 */
/*09e0*/ LDG.E.64 R20, [R12.64+0x28] ; /* 0x000028040c147981 */
/* 0x000ea8000c1e1b00 */
/*09f0*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea4000c1e1b00 */
/*0a00*/ DFMA R26, R20, R18, R16 ; /* 0x00000012141a722b */
/* 0x0044c40000000010 */
/*0a10*/ LDG.E.64 R16, [R14.64+0x30] ; /* 0x000030040e107981 */
/* 0x004e2a000c1e1b00 */
/*0a20*/ STG.E.64 [R14.64+0x28], R26 ; /* 0x0000281a0e007986 */
/* 0x0083e8000c101b04 */
/*0a30*/ LDG.E.64 R18, [R12.64+0x30] ; /* 0x000030040c127981 */
/* 0x000e28000c1e1b00 */
/*0a40*/ LDG.E.64 R20, [R10.64] ; /* 0x000000040a147981 */
/* 0x000e24000c1e1b00 */
/*0a50*/ DFMA R24, R18, R20, R16 ; /* 0x000000141218722b */
/* 0x0010840000000010 */
/*0a60*/ LDG.E.64 R16, [R14.64+0x38] ; /* 0x000038040e107981 */
/* 0x001eea000c1e1b00 */
/*0a70*/ STG.E.64 [R14.64+0x30], R24 ; /* 0x000030180e007986 */
/* 0x0043e8000c101b04 */
/*0a80*/ LDG.E.64 R18, [R12.64+0x38] ; /* 0x000038040c127981 */
/* 0x000ee8000c1e1b00 */
/*0a90*/ LDG.E.64 R20, [R10.64] ; /* 0x000000040a147981 */
/* 0x000ee2000c1e1b00 */
/*0aa0*/ IADD3 R22, P3, R22, 0x40, RZ ; /* 0x0000004016167810 */
/* 0x000fc40007f7e0ff */
/*0ab0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0ac0*/ IADD3 R9, R9, 0x8, RZ ; /* 0x0000000809097810 */
/* 0x000fe20007ffe0ff */
/*0ad0*/ IMAD.X R23, RZ, RZ, R23, P3 ; /* 0x000000ffff177224 */
/* 0x000fe200018e0617 */
/*0ae0*/ IADD3 R8, R8, -0x8, RZ ; /* 0xfffffff808087810 */
/* 0x000fe20007ffe0ff */
/*0af0*/ DFMA R16, R18, R20, R16 ; /* 0x000000141210722b */
/* 0x0080a40000000010 */
/*0b00*/ IADD3 R18, P4, R12, 0x40, RZ ; /* 0x000000400c127810 */
/* 0x001fca0007f9e0ff */
/*0b10*/ STG.E.64 [R14.64+0x38], R16 ; /* 0x000038100e007986 */
/* 0x0043e2000c101b04 */
/*0b20*/ IMAD.X R19, RZ, RZ, R13, P4 ; /* 0x000000ffff137224 */
/* 0x000fe400020e060d */
/*0b30*/ IMAD.MOV.U32 R12, RZ, RZ, R18 ; /* 0x000000ffff0c7224 */
/* 0x000fe400078e0012 */
/*0b40*/ IMAD.MOV.U32 R13, RZ, RZ, R19 ; /* 0x000000ffff0d7224 */
/* 0x000fe400078e0013 */
/*0b50*/ ISETP.NE.OR P0, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */
/* 0x000fda0000705670 */
/*0b60*/ @!P0 BRA 0xd60 ; /* 0x000001f000008947 */
/* 0x000fea0003800000 */
/*0b70*/ IMAD.WIDE R14, R4, 0x8, R22 ; /* 0x00000008040e7825 */
/* 0x003fe200078e0216 */
/*0b80*/ LDG.E.64 R20, [R12.64] ; /* 0x000000040c147981 */
/* 0x000ea8000c1e1b00 */
/*0b90*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea8000c1e1b00 */
/*0ba0*/ LDG.E.64 R16, [R14.64] ; /* 0x000000040e107981 */
/* 0x000ea4000c1e1b00 */
/*0bb0*/ DFMA R28, R20, R18, R16 ; /* 0x00000012141c722b */
/* 0x0040440000000010 */
/*0bc0*/ LDG.E.64 R16, [R14.64+0x8] ; /* 0x000008040e107981 */
/* 0x001eaa000c1e1b00 */
/*0bd0*/ STG.E.64 [R14.64], R28 ; /* 0x0000001c0e007986 */
/* 0x0021e8000c101b04 */
/*0be0*/ LDG.E.64 R20, [R12.64+0x8] ; /* 0x000008040c147981 */
/* 0x000ea8000c1e1b00 */
/*0bf0*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea4000c1e1b00 */
/*0c00*/ DFMA R26, R20, R18, R16 ; /* 0x00000012141a722b */
/* 0x0042840000000010 */
/*0c10*/ LDG.E.64 R16, [R14.64+0x10] ; /* 0x000010040e107981 */
/* 0x002eea000c1e1b00 */
/*0c20*/ STG.E.64 [R14.64+0x8], R26 ; /* 0x0000081a0e007986 */
/* 0x0041e8000c101b04 */
/*0c30*/ LDG.E.64 R20, [R12.64+0x10] ; /* 0x000010040c147981 */
/* 0x000ee8000c1e1b00 */
/*0c40*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ee2000c1e1b00 */
/*0c50*/ IADD3 R8, R8, -0x4, RZ ; /* 0xfffffffc08087810 */
/* 0x000fe20007ffe0ff */
/*0c60*/ DFMA R24, R20, R18, R16 ; /* 0x000000121418722b */
/* 0x0082840000000010 */
/*0c70*/ LDG.E.64 R16, [R14.64+0x18] ; /* 0x000018040e107981 */
/* 0x002eea000c1e1b00 */
/*0c80*/ STG.E.64 [R14.64+0x10], R24 ; /* 0x000010180e007986 */
/* 0x0041e8000c101b04 */
/*0c90*/ LDG.E.64 R20, [R12.64+0x18] ; /* 0x000018040c147981 */
/* 0x000ee8000c1e1b00 */
/*0ca0*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ee2000c1e1b00 */
/*0cb0*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fc40003f05270 */
/*0cc0*/ IADD3 R22, P3, R22, 0x20, RZ ; /* 0x0000002016167810 */
/* 0x000fe40007f7e0ff */
/*0cd0*/ IADD3 R9, R9, 0x4, RZ ; /* 0x0000000409097810 */
/* 0x000fc60007ffe0ff */
/*0ce0*/ IMAD.X R23, RZ, RZ, R23, P3 ; /* 0x000000ffff177224 */
/* 0x000fe200018e0617 */
/*0cf0*/ DFMA R16, R20, R18, R16 ; /* 0x000000121410722b */
/* 0x0082a40000000010 */
/*0d00*/ IADD3 R18, P4, R12, 0x20, RZ ; /* 0x000000200c127810 */
/* 0x002fc80007f9e0ff */
/*0d10*/ IADD3.X R19, RZ, R13, RZ, P4, !PT ; /* 0x0000000dff137210 */
/* 0x000fe200027fe4ff */
/*0d20*/ STG.E.64 [R14.64+0x18], R16 ; /* 0x000018100e007986 */
/* 0x0041e2000c101b04 */
/*0d30*/ IMAD.MOV.U32 R12, RZ, RZ, R18 ; /* 0x000000ffff0c7224 */
/* 0x000fc600078e0012 */
/*0d40*/ IMAD.MOV.U32 R13, RZ, RZ, R19 ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e0013 */
/*0d50*/ @P0 BRA 0xb70 ; /* 0xfffffe1000000947 */
/* 0x001fea000383ffff */
/*0d60*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*0d70*/ @!P0 BRA 0xef0 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*0d80*/ IMAD.IADD R8, R6, 0x1, R9.reuse ; /* 0x0000000106087824 */
/* 0x100fe200078e0209 */
/*0d90*/ LDG.E.64 R14, [R10.64] ; /* 0x000000040a0e7981 */
/* 0x003ea2000c1e1b00 */
/*0da0*/ IMAD.IADD R6, R4, 0x1, R9 ; /* 0x0000000104067824 */
/* 0x000fe400078e0209 */
/*0db0*/ IMAD.WIDE R8, R8, R7, c[0x0][0x178] ; /* 0x00005e0008087625 */
/* 0x000fc800078e0207 */
/*0dc0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x180] ; /* 0x0000600006067625 */
/* 0x000fe200078e0207 */
/*0dd0*/ LDG.E.64 R12, [R8.64] ; /* 0x00000004080c7981 */
/* 0x000ea8000c1e1b00 */
/*0de0*/ LDG.E.64 R16, [R6.64] ; /* 0x0000000406107981 */
/* 0x000ea2000c1e1b00 */
/*0df0*/ ISETP.NE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fe20003f05270 */
/*0e00*/ DFMA R12, R12, R14, R16 ; /* 0x0000000e0c0c722b */
/* 0x004e0e0000000010 */
/*0e10*/ STG.E.64 [R6.64], R12 ; /* 0x0000000c06007986 */
/* 0x0011ea000c101b04 */
/*0e20*/ @!P0 BRA 0xef0 ; /* 0x000000c000008947 */
/* 0x000fea0003800000 */
/*0e30*/ LDG.E.64 R12, [R8.64+0x8] ; /* 0x00000804080c7981 */
/* 0x001ea8000c1e1b00 */
/*0e40*/ LDG.E.64 R14, [R10.64] ; /* 0x000000040a0e7981 */
/* 0x000ea8000c1e1b00 */
/*0e50*/ LDG.E.64 R16, [R6.64+0x8] ; /* 0x0000080406107981 */
/* 0x000ea2000c1e1b00 */
/*0e60*/ ISETP.NE.AND P0, PT, R0, 0x2, PT ; /* 0x000000020000780c */
/* 0x000fe20003f05270 */
/*0e70*/ DFMA R12, R12, R14, R16 ; /* 0x0000000e0c0c722b */
/* 0x004e0e0000000010 */
/*0e80*/ STG.E.64 [R6.64+0x8], R12 ; /* 0x0000080c06007986 */
/* 0x0011ea000c101b04 */
/*0e90*/ @!P0 BRA 0xef0 ; /* 0x0000005000008947 */
/* 0x000fea0003800000 */
/*0ea0*/ LDG.E.64 R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000ea8000c1e1b00 */
/*0eb0*/ LDG.E.64 R8, [R8.64+0x10] ; /* 0x0000100408087981 */
/* 0x000ea8000c1e1b00 */
/*0ec0*/ LDG.E.64 R12, [R6.64+0x10] ; /* 0x00001004060c7981 */
/* 0x001ea4000c1e1b00 */
/*0ed0*/ DFMA R12, R10, R8, R12 ; /* 0x000000080a0c722b */
/* 0x004e0e000000000c */
/*0ee0*/ STG.E.64 [R6.64+0x10], R12 ; /* 0x0000100c06007986 */
/* 0x0011e8000c101b04 */
/*0ef0*/ @!P2 BRA 0x130 ; /* 0xfffff2300000a947 */
/* 0x000fea000383ffff */
/*0f00*/ IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102027810 */
/* 0x000fc80007ffe0ff */
/*0f10*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x160], PT ; /* 0x0000580002007a0c */
/* 0x000fda0003f06270 */
/*0f20*/ @P0 CALL.REL.NOINC 0xf40 ; /* 0x0000001000000944 */
/* 0x000fe20003c00000 */
/*0f30*/ BRA 0xe0 ; /* 0xfffff1a000007947 */
/* 0x000fea000383ffff */
/*0f40*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0f50*/ BRA 0xf50; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0f60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fe0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ff0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // Write a first sequential implementation (matmult gpu1()) of matrix multiplication on the
// GPU that uses only a single thread. It should work for all matrix sizes. Hints:
// – You need CUDA code to allocate memory on the GPU, transfer A and B to the
// GPU, transfer C back to the CPU, and free the allocated memory.
//
// Time your kernel for small matrix sizes and compare to the reference DGEMM on the CPU.
// matrix times matrix
// m represents the number of rows (the vertical length) of A and C,
// k represents the number of columns of A and the n. of rows of B,
// n represents the number of columns (the horizontal length) of B and C.
// ____k____ ____n____ ____n____
// | | | | | |
// m | A | X k | B | = m | C |
// | | | | | |
// --------- --------- ---------
__global__ void m1(int m, int n, int k, double *A, double *B, double *C) {
// for (int i = 0; i < m; i++) {
// for (int j = 0; j < n; j++) {
// C[i*n + j] = 0;
// }
// }
for (int i = 0; i < m; i++) {
for (int h = 0; h < k; h++){
for (int j = 0; j < n; j++) {
C[i*n + j] += A[i*k + h] * B[h*n + j];
}
}
}
}
extern "C" {
void matmult_gpu1(int m, int n, int k, double *A, double *B, double *C) {
double* d_A, * d_B, * d_C;
cudaSetDevice(2);
cudaMalloc((void**)&d_A, m*k * sizeof(double));
cudaMalloc((void**)&d_B, k*n * sizeof(double));
cudaMalloc((void**)&d_C, m*n * sizeof(double));
cudaMemcpy(d_A, A, m*k * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, k*n * sizeof(double), cudaMemcpyHostToDevice);
// Initialize the output matrix with zeroes.
cudaMemset(d_C, 0, m*n * sizeof(double));
m1<<<1,1>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(C, d_C, m*n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
} | .file "tmpxft_000b0dc7_00000000-6_matmult_gpu1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z28__device_stub__Z2m1iiiPdS_S_iiiPdS_S_
.type _Z28__device_stub__Z2m1iiiPdS_S_iiiPdS_S_, @function
_Z28__device_stub__Z2m1iiiPdS_S_iiiPdS_S_:
.LFB2052:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 44(%rsp)
movl %esi, 40(%rsp)
movl %edx, 36(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z2m1iiiPdS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z28__device_stub__Z2m1iiiPdS_S_iiiPdS_S_, .-_Z28__device_stub__Z2m1iiiPdS_S_iiiPdS_S_
.globl _Z2m1iiiPdS_S_
.type _Z2m1iiiPdS_S_, @function
_Z2m1iiiPdS_S_:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z2m1iiiPdS_S_iiiPdS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z2m1iiiPdS_S_, .-_Z2m1iiiPdS_S_
.globl matmult_gpu1
.type matmult_gpu1, @function
matmult_gpu1:
.LFB2027:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movl %edi, %r13d
movl %esi, %r14d
movl %edx, %r15d
movq %rcx, 8(%rsp)
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $2, %edi
call cudaSetDevice@PLT
movl %r13d, %r12d
imull %r15d, %r12d
movslq %r12d, %r12
salq $3, %r12
leaq 40(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
movl %r15d, %ebp
imull %r14d, %ebp
movslq %ebp, %rbp
salq $3, %rbp
leaq 48(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl %r13d, %ebx
imull %r14d, %ebx
movslq %ebx, %rbx
salq $3, %rbx
leaq 56(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r12, %rdx
movq 8(%rsp), %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbp, %rdx
movq 16(%rsp), %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movq %rbx, %rdx
movl $0, %esi
movq 56(%rsp), %rdi
call cudaMemset@PLT
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 56(%rsp), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movq 56(%rsp), %r9
movq 48(%rsp), %r8
movq 40(%rsp), %rcx
movl %r15d, %edx
movl %r14d, %esi
movl %r13d, %edi
call _Z28__device_stub__Z2m1iiiPdS_S_iiiPdS_S_
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size matmult_gpu1, .-matmult_gpu1
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z2m1iiiPdS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z2m1iiiPdS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // Write a first sequential implementation (matmult gpu1()) of matrix multiplication on the
// GPU that uses only a single thread. It should work for all matrix sizes. Hints:
// – You need CUDA code to allocate memory on the GPU, transfer A and B to the
// GPU, transfer C back to the CPU, and free the allocated memory.
//
// Time your kernel for small matrix sizes and compare to the reference DGEMM on the CPU.
// matrix times matrix
// m represents the number of rows (the vertical length) of A and C,
// k represents the number of columns of A and the n. of rows of B,
// n represents the number of columns (the horizontal length) of B and C.
// ____k____ ____n____ ____n____
// | | | | | |
// m | A | X k | B | = m | C |
// | | | | | |
// --------- --------- ---------
__global__ void m1(int m, int n, int k, double *A, double *B, double *C) {
// for (int i = 0; i < m; i++) {
// for (int j = 0; j < n; j++) {
// C[i*n + j] = 0;
// }
// }
for (int i = 0; i < m; i++) {
for (int h = 0; h < k; h++){
for (int j = 0; j < n; j++) {
C[i*n + j] += A[i*k + h] * B[h*n + j];
}
}
}
}
extern "C" {
void matmult_gpu1(int m, int n, int k, double *A, double *B, double *C) {
double* d_A, * d_B, * d_C;
cudaSetDevice(2);
cudaMalloc((void**)&d_A, m*k * sizeof(double));
cudaMalloc((void**)&d_B, k*n * sizeof(double));
cudaMalloc((void**)&d_C, m*n * sizeof(double));
cudaMemcpy(d_A, A, m*k * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, k*n * sizeof(double), cudaMemcpyHostToDevice);
// Initialize the output matrix with zeroes.
cudaMemset(d_C, 0, m*n * sizeof(double));
m1<<<1,1>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(C, d_C, m*n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
} | #include <hip/hip_runtime.h>
// Write a first sequential implementation (matmult gpu1()) of matrix multiplication on the
// GPU that uses only a single thread. It should work for all matrix sizes. Hints:
// – You need CUDA code to allocate memory on the GPU, transfer A and B to the
// GPU, transfer C back to the CPU, and free the allocated memory.
//
// Time your kernel for small matrix sizes and compare to the reference DGEMM on the CPU.
// matrix times matrix
// m represents the number of rows (the vertical length) of A and C,
// k represents the number of columns of A and the n. of rows of B,
// n represents the number of columns (the horizontal length) of B and C.
// ____k____ ____n____ ____n____
// | | | | | |
// m | A | X k | B | = m | C |
// | | | | | |
// --------- --------- ---------
__global__ void m1(int m, int n, int k, double *A, double *B, double *C) {
// for (int i = 0; i < m; i++) {
// for (int j = 0; j < n; j++) {
// C[i*n + j] = 0;
// }
// }
for (int i = 0; i < m; i++) {
for (int h = 0; h < k; h++){
for (int j = 0; j < n; j++) {
C[i*n + j] += A[i*k + h] * B[h*n + j];
}
}
}
}
extern "C" {
void matmult_gpu1(int m, int n, int k, double *A, double *B, double *C) {
double* d_A, * d_B, * d_C;
hipSetDevice(2);
hipMalloc((void**)&d_A, m*k * sizeof(double));
hipMalloc((void**)&d_B, k*n * sizeof(double));
hipMalloc((void**)&d_C, m*n * sizeof(double));
hipMemcpy(d_A, A, m*k * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, k*n * sizeof(double), hipMemcpyHostToDevice);
// Initialize the output matrix with zeroes.
hipMemset(d_C, 0, m*n * sizeof(double));
m1<<<1,1>>>(m, n, k, d_A, d_B, d_C);
hipDeviceSynchronize();
hipMemcpy(C, d_C, m*n * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A); hipFree(d_B); hipFree(d_C);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
// Write a first sequential implementation (matmult gpu1()) of matrix multiplication on the
// GPU that uses only a single thread. It should work for all matrix sizes. Hints:
// – You need CUDA code to allocate memory on the GPU, transfer A and B to the
// GPU, transfer C back to the CPU, and free the allocated memory.
//
// Time your kernel for small matrix sizes and compare to the reference DGEMM on the CPU.
// matrix times matrix
// m represents the number of rows (the vertical length) of A and C,
// k represents the number of columns of A and the n. of rows of B,
// n represents the number of columns (the horizontal length) of B and C.
// ____k____ ____n____ ____n____
// | | | | | |
// m | A | X k | B | = m | C |
// | | | | | |
// --------- --------- ---------
__global__ void m1(int m, int n, int k, double *A, double *B, double *C) {
// for (int i = 0; i < m; i++) {
// for (int j = 0; j < n; j++) {
// C[i*n + j] = 0;
// }
// }
for (int i = 0; i < m; i++) {
for (int h = 0; h < k; h++){
for (int j = 0; j < n; j++) {
C[i*n + j] += A[i*k + h] * B[h*n + j];
}
}
}
}
extern "C" {
void matmult_gpu1(int m, int n, int k, double *A, double *B, double *C) {
double* d_A, * d_B, * d_C;
hipSetDevice(2);
hipMalloc((void**)&d_A, m*k * sizeof(double));
hipMalloc((void**)&d_B, k*n * sizeof(double));
hipMalloc((void**)&d_C, m*n * sizeof(double));
hipMemcpy(d_A, A, m*k * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, k*n * sizeof(double), hipMemcpyHostToDevice);
// Initialize the output matrix with zeroes.
hipMemset(d_C, 0, m*n * sizeof(double));
m1<<<1,1>>>(m, n, k, d_A, d_B, d_C);
hipDeviceSynchronize();
hipMemcpy(C, d_C, m*n * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A); hipFree(d_B); hipFree(d_C);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z2m1iiiPdS_S_
.globl _Z2m1iiiPdS_S_
.p2align 8
.type _Z2m1iiiPdS_S_,@function
_Z2m1iiiPdS_S_:
s_load_b32 s22, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s22, 1
s_cbranch_scc1 .LBB0_9
s_clause 0x2
s_load_b64 s[2:3], s[0:1], 0x4
s_load_b128 s[4:7], s[0:1], 0x10
s_load_b64 s[8:9], s[0:1], 0x20
s_mov_b32 s11, 0
s_mov_b32 s23, 0
s_mov_b32 s12, s11
s_waitcnt lgkmcnt(0)
s_cmp_gt_i32 s3, 0
s_cselect_b32 s0, -1, 0
s_cmp_gt_i32 s2, 0
v_cndmask_b32_e64 v0, 0, 1, s0
s_cselect_b32 s0, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v1, 0, 1, s0
v_cmp_ne_u32_e64 s0, 1, v0
v_mov_b32_e32 v0, 0
s_delay_alu instid0(VALU_DEP_3)
v_cmp_ne_u32_e64 s1, 1, v1
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_add_i32 s23, s23, 1
s_add_i32 s12, s12, s2
s_cmp_eq_u32 s23, s22
s_cbranch_scc1 .LBB0_9
.LBB0_3:
s_and_b32 vcc_lo, exec_lo, s0
s_cbranch_vccnz .LBB0_2
s_mov_b32 s13, s11
s_mov_b32 s10, 0
s_lshl_b64 s[14:15], s[12:13], 3
s_mul_i32 s24, s23, s3
s_add_u32 s14, s8, s14
s_addc_u32 s15, s9, s15
s_mov_b32 s13, s10
s_branch .LBB0_6
.p2align 6
.LBB0_5:
s_add_i32 s13, s13, 1
s_add_i32 s10, s10, s2
s_cmp_eq_u32 s13, s3
s_cbranch_scc1 .LBB0_2
.LBB0_6:
s_and_b32 vcc_lo, exec_lo, s1
s_cbranch_vccnz .LBB0_5
s_lshl_b64 s[16:17], s[10:11], 3
s_mov_b32 s19, s11
s_add_u32 s16, s6, s16
s_addc_u32 s17, s7, s17
s_add_i32 s18, s13, s24
s_mov_b32 s25, s2
s_lshl_b64 s[20:21], s[18:19], 3
s_mov_b64 s[18:19], s[14:15]
s_add_u32 s20, s4, s20
s_addc_u32 s21, s5, s21
.p2align 6
.LBB0_8:
s_clause 0x2
global_load_b64 v[1:2], v0, s[20:21]
global_load_b64 v[3:4], v0, s[16:17]
global_load_b64 v[5:6], v0, s[18:19]
s_add_i32 s25, s25, -1
s_waitcnt vmcnt(0)
v_fma_f64 v[1:2], v[1:2], v[3:4], v[5:6]
global_store_b64 v0, v[1:2], s[18:19]
s_add_u32 s18, s18, 8
s_addc_u32 s19, s19, 0
s_add_u32 s16, s16, 8
s_addc_u32 s17, s17, 0
s_cmp_eq_u32 s25, 0
s_cbranch_scc0 .LBB0_8
s_branch .LBB0_5
.LBB0_9:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z2m1iiiPdS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 40
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 26
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z2m1iiiPdS_S_, .Lfunc_end0-_Z2m1iiiPdS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 40
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z2m1iiiPdS_S_
.private_segment_fixed_size: 0
.sgpr_count: 28
.sgpr_spill_count: 0
.symbol: _Z2m1iiiPdS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
// Write a first sequential implementation (matmult gpu1()) of matrix multiplication on the
// GPU that uses only a single thread. It should work for all matrix sizes. Hints:
// – You need CUDA code to allocate memory on the GPU, transfer A and B to the
// GPU, transfer C back to the CPU, and free the allocated memory.
//
// Time your kernel for small matrix sizes and compare to the reference DGEMM on the CPU.
// matrix times matrix
// m represents the number of rows (the vertical length) of A and C,
// k represents the number of columns of A and the n. of rows of B,
// n represents the number of columns (the horizontal length) of B and C.
// ____k____ ____n____ ____n____
// | | | | | |
// m | A | X k | B | = m | C |
// | | | | | |
// --------- --------- ---------
__global__ void m1(int m, int n, int k, double *A, double *B, double *C) {
// for (int i = 0; i < m; i++) {
// for (int j = 0; j < n; j++) {
// C[i*n + j] = 0;
// }
// }
for (int i = 0; i < m; i++) {
for (int h = 0; h < k; h++){
for (int j = 0; j < n; j++) {
C[i*n + j] += A[i*k + h] * B[h*n + j];
}
}
}
}
extern "C" {
void matmult_gpu1(int m, int n, int k, double *A, double *B, double *C) {
double* d_A, * d_B, * d_C;
hipSetDevice(2);
hipMalloc((void**)&d_A, m*k * sizeof(double));
hipMalloc((void**)&d_B, k*n * sizeof(double));
hipMalloc((void**)&d_C, m*n * sizeof(double));
hipMemcpy(d_A, A, m*k * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, k*n * sizeof(double), hipMemcpyHostToDevice);
// Initialize the output matrix with zeroes.
hipMemset(d_C, 0, m*n * sizeof(double));
m1<<<1,1>>>(m, n, k, d_A, d_B, d_C);
hipDeviceSynchronize();
hipMemcpy(C, d_C, m*n * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A); hipFree(d_B); hipFree(d_C);
}
} | .text
.file "matmult_gpu1.hip"
.globl _Z17__device_stub__m1iiiPdS_S_ # -- Begin function _Z17__device_stub__m1iiiPdS_S_
.p2align 4, 0x90
.type _Z17__device_stub__m1iiiPdS_S_,@function
_Z17__device_stub__m1iiiPdS_S_: # @_Z17__device_stub__m1iiiPdS_S_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z2m1iiiPdS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z17__device_stub__m1iiiPdS_S_, .Lfunc_end0-_Z17__device_stub__m1iiiPdS_S_
.cfi_endproc
# -- End function
.globl matmult_gpu1 # -- Begin function matmult_gpu1
.p2align 4, 0x90
.type matmult_gpu1,@function
matmult_gpu1: # @matmult_gpu1
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, 64(%rsp) # 8-byte Spill
movq %r8, 56(%rsp) # 8-byte Spill
movq %rcx, 48(%rsp) # 8-byte Spill
movl %edx, %ebp
movl %esi, %r15d
movl %edi, %r12d
movl $2, %edi
callq hipSetDevice
movl %ebp, %eax
imull %r12d, %eax
movslq %eax, %r13
shlq $3, %r13
leaq 24(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
movl %ebp, %eax
imull %r15d, %eax
movslq %eax, %rbx
shlq $3, %rbx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movl %r15d, %eax
imull %r12d, %eax
movslq %eax, %r14
shlq $3, %r14
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq 48(%rsp), %rsi # 8-byte Reload
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movq 56(%rsp), %rsi # 8-byte Reload
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movl %r12d, 44(%rsp)
movl %r15d, 40(%rsp)
movl %ebp, 36(%rsp)
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %rdx, 120(%rsp)
leaq 44(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rax
movq %rax, 152(%rsp)
leaq 36(%rsp), %rax
movq %rax, 160(%rsp)
leaq 136(%rsp), %rax
movq %rax, 168(%rsp)
leaq 128(%rsp), %rax
movq %rax, 176(%rsp)
leaq 120(%rsp), %rax
movq %rax, 184(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z2m1iiiPdS_S_, %edi
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
movq 8(%rsp), %rsi
movq 64(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size matmult_gpu1, .Lfunc_end1-matmult_gpu1
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z2m1iiiPdS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z2m1iiiPdS_S_,@object # @_Z2m1iiiPdS_S_
.section .rodata,"a",@progbits
.globl _Z2m1iiiPdS_S_
.p2align 3, 0x0
_Z2m1iiiPdS_S_:
.quad _Z17__device_stub__m1iiiPdS_S_
.size _Z2m1iiiPdS_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z2m1iiiPdS_S_"
.size .L__unnamed_1, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z17__device_stub__m1iiiPdS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z2m1iiiPdS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z2m1iiiPdS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R0, RZ, RZ, 0x1 ; /* 0x00000001ff007424 */
/* 0x000fca00078e00ff */
/*0020*/ ISETP.LE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fda0003f03270 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ ISETP.LE.AND P0, PT, R0, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */
/* 0x000fda0003f03270 */
/*0050*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R0, RZ, RZ, 0x1 ; /* 0x00000001ff007424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff027624 */
/* 0x000fc600078e00ff */
/*0090*/ IADD3 R0, -R0, c[0x0][0x164], RZ ; /* 0x0000590000007a10 */
/* 0x000fc80007ffe1ff */
/*00a0*/ ISETP.GE.U32.AND P1, PT, R0, 0x3, PT ; /* 0x000000030000780c */
/* 0x000fe40003f26070 */
/*00b0*/ LOP3.LUT R0, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302007812 */
/* 0x000fe200078ec0ff */
/*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x000fc600078e00ff */
/*00d0*/ IADD3 R3, -R0, c[0x0][0x164], RZ ; /* 0x0000590000037a10 */
/* 0x000fe40007ffe1ff */
/*00e0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff047624 */
/* 0x000fca00078e00ff */
/*00f0*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fda0003f06270 */
/*0100*/ @!P0 BRA 0xf00 ; /* 0x00000df000008947 */
/* 0x003fea0003800000 */
/*0110*/ IMAD R4, R2, c[0x0][0x164], RZ ; /* 0x0000590002047a24 */
/* 0x000fe400078e02ff */
/*0120*/ IMAD.MOV.U32 R5, RZ, RZ, RZ ; /* 0x000000ffff057224 */
/* 0x000fc800078e00ff */
/*0130*/ IMAD R10, R2, c[0x0][0x168], R5 ; /* 0x00005a00020a7a24 */
/* 0x000fe400078e0205 */
/*0140*/ IMAD R6, R5.reuse, c[0x0][0x164], RZ ; /* 0x0000590005067a24 */
/* 0x041fe200078e02ff */
/*0150*/ IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105057810 */
/* 0x000fe20007ffe0ff */
/*0160*/ IMAD.MOV.U32 R7, RZ, RZ, 0x8 ; /* 0x00000008ff077424 */
/* 0x000fe400078e00ff */
/*0170*/ IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff097224 */
/* 0x000fe200078e00ff */
/*0180*/ ISETP.GE.AND P2, PT, R5, c[0x0][0x168], PT ; /* 0x00005a0005007a0c */
/* 0x000fe20003f46270 */
/*0190*/ IMAD.WIDE R10, R10, R7, c[0x0][0x170] ; /* 0x00005c000a0a7625 */
/* 0x000fe200078e0207 */
/*01a0*/ @!P1 BRA 0xd60 ; /* 0x00000bb000009947 */
/* 0x002fea0003800000 */
/*01b0*/ ISETP.GT.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fe20003f04270 */
/*01c0*/ IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff097224 */
/* 0x000fc400078e00ff */
/*01d0*/ IMAD.MOV.U32 R22, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff167624 */
/* 0x000fe400078e00ff */
/*01e0*/ IMAD.MOV.U32 R23, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff177624 */
/* 0x000fe400078e00ff */
/*01f0*/ IMAD.MOV.U32 R8, RZ, RZ, R3 ; /* 0x000000ffff087224 */
/* 0x000fe400078e0003 */
/*0200*/ IMAD.WIDE R12, R6, R7, c[0x0][0x178] ; /* 0x00005e00060c7625 */
/* 0x000fc800078e0207 */
/*0210*/ @!P0 BRA 0xb70 ; /* 0x0000095000008947 */
/* 0x000fea0003800000 */
/*0220*/ ISETP.GT.AND P3, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe40003f64270 */
/*0230*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0240*/ @!P3 BRA 0x810 ; /* 0x000005c00000b947 */
/* 0x000fea0003800000 */
/*0250*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0260*/ IMAD.WIDE R20, R4, 0x8, R22 ; /* 0x0000000804147825 */
/* 0x001fe200078e0216 */
/*0270*/ LDG.E.64 R18, [R12.64] ; /* 0x000000040c127981 */
/* 0x000ea8000c1e1b00 */
/*0280*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ea8000c1e1b00 */
/*0290*/ LDG.E.64 R14, [R20.64] ; /* 0x00000004140e7981 */
/* 0x000ea4000c1e1b00 */
/*02a0*/ DFMA R28, R18, R16, R14 ; /* 0x00000010121c722b */
/* 0x004044000000000e */
/*02b0*/ LDG.E.64 R14, [R20.64+0x8] ; /* 0x00000804140e7981 */
/* 0x001eaa000c1e1b00 */
/*02c0*/ STG.E.64 [R20.64], R28 ; /* 0x0000001c14007986 */
/* 0x0021e8000c101b04 */
/*02d0*/ LDG.E.64 R18, [R12.64+0x8] ; /* 0x000008040c127981 */
/* 0x000ea8000c1e1b00 */
/*02e0*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ea4000c1e1b00 */
/*02f0*/ DFMA R26, R18, R16, R14 ; /* 0x00000010121a722b */
/* 0x004284000000000e */
/*0300*/ LDG.E.64 R14, [R20.64+0x10] ; /* 0x00001004140e7981 */
/* 0x002eea000c1e1b00 */
/*0310*/ STG.E.64 [R20.64+0x8], R26 ; /* 0x0000081a14007986 */
/* 0x0043e8000c101b04 */
/*0320*/ LDG.E.64 R18, [R12.64+0x10] ; /* 0x000010040c127981 */
/* 0x000ee8000c1e1b00 */
/*0330*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ee4000c1e1b00 */
/*0340*/ DFMA R24, R18, R16, R14 ; /* 0x000000101218722b */
/* 0x0084c4000000000e */
/*0350*/ LDG.E.64 R14, [R20.64+0x18] ; /* 0x00001804140e7981 */
/* 0x004e2a000c1e1b00 */
/*0360*/ STG.E.64 [R20.64+0x10], R24 ; /* 0x0000101814007986 */
/* 0x0085e8000c101b04 */
/*0370*/ LDG.E.64 R18, [R12.64+0x18] ; /* 0x000018040c127981 */
/* 0x000e28000c1e1b00 */
/*0380*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e24000c1e1b00 */
/*0390*/ DFMA R28, R18, R16, R14 ; /* 0x00000010121c722b */
/* 0x0010c4000000000e */
/*03a0*/ LDG.E.64 R14, [R20.64+0x20] ; /* 0x00002004140e7981 */
/* 0x001e6a000c1e1b00 */
/*03b0*/ STG.E.64 [R20.64+0x18], R28 ; /* 0x0000181c14007986 */
/* 0x0081e8000c101b04 */
/*03c0*/ LDG.E.64 R18, [R12.64+0x20] ; /* 0x000020040c127981 */
/* 0x000e68000c1e1b00 */
/*03d0*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e64000c1e1b00 */
/*03e0*/ DFMA R26, R18, R16, R14 ; /* 0x00000010121a722b */
/* 0x0022c4000000000e */
/*03f0*/ LDG.E.64 R14, [R20.64+0x28] ; /* 0x00002804140e7981 */
/* 0x002eaa000c1e1b00 */
/*0400*/ STG.E.64 [R20.64+0x20], R26 ; /* 0x0000201a14007986 */
/* 0x0083e8000c101b04 */
/*0410*/ LDG.E.64 R18, [R12.64+0x28] ; /* 0x000028040c127981 */
/* 0x000ea8000c1e1b00 */
/*0420*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ea4000c1e1b00 */
/*0430*/ DFMA R24, R18, R16, R14 ; /* 0x000000101218722b */
/* 0x0044c4000000000e */
/*0440*/ LDG.E.64 R14, [R20.64+0x30] ; /* 0x00003004140e7981 */
/* 0x004e2a000c1e1b00 */
/*0450*/ STG.E.64 [R20.64+0x28], R24 ; /* 0x0000281814007986 */
/* 0x0085e8000c101b04 */
/*0460*/ LDG.E.64 R18, [R12.64+0x30] ; /* 0x000030040c127981 */
/* 0x000e28000c1e1b00 */
/*0470*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e24000c1e1b00 */
/*0480*/ DFMA R28, R18, R16, R14 ; /* 0x00000010121c722b */
/* 0x0010c4000000000e */
/*0490*/ LDG.E.64 R14, [R20.64+0x38] ; /* 0x00003804140e7981 */
/* 0x001e6a000c1e1b00 */
/*04a0*/ STG.E.64 [R20.64+0x30], R28 ; /* 0x0000301c14007986 */
/* 0x0081e8000c101b04 */
/*04b0*/ LDG.E.64 R18, [R12.64+0x38] ; /* 0x000038040c127981 */
/* 0x000e68000c1e1b00 */
/*04c0*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e64000c1e1b00 */
/*04d0*/ DFMA R26, R18, R16, R14 ; /* 0x00000010121a722b */
/* 0x0022c4000000000e */
/*04e0*/ LDG.E.64 R14, [R20.64+0x40] ; /* 0x00004004140e7981 */
/* 0x002eaa000c1e1b00 */
/*04f0*/ STG.E.64 [R20.64+0x38], R26 ; /* 0x0000381a14007986 */
/* 0x0083e8000c101b04 */
/*0500*/ LDG.E.64 R18, [R12.64+0x40] ; /* 0x000040040c127981 */
/* 0x000ea8000c1e1b00 */
/*0510*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ea4000c1e1b00 */
/*0520*/ DFMA R24, R18, R16, R14 ; /* 0x000000101218722b */
/* 0x0044c4000000000e */
/*0530*/ LDG.E.64 R14, [R20.64+0x48] ; /* 0x00004804140e7981 */
/* 0x004e2a000c1e1b00 */
/*0540*/ STG.E.64 [R20.64+0x40], R24 ; /* 0x0000401814007986 */
/* 0x0085e8000c101b04 */
/*0550*/ LDG.E.64 R18, [R12.64+0x48] ; /* 0x000048040c127981 */
/* 0x000e28000c1e1b00 */
/*0560*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e24000c1e1b00 */
/*0570*/ DFMA R28, R18, R16, R14 ; /* 0x00000010121c722b */
/* 0x0010c4000000000e */
/*0580*/ LDG.E.64 R14, [R20.64+0x50] ; /* 0x00005004140e7981 */
/* 0x001e6a000c1e1b00 */
/*0590*/ STG.E.64 [R20.64+0x48], R28 ; /* 0x0000481c14007986 */
/* 0x0081e8000c101b04 */
/*05a0*/ LDG.E.64 R18, [R12.64+0x50] ; /* 0x000050040c127981 */
/* 0x000e68000c1e1b00 */
/*05b0*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e64000c1e1b00 */
/*05c0*/ DFMA R26, R18, R16, R14 ; /* 0x00000010121a722b */
/* 0x0022c4000000000e */
/*05d0*/ LDG.E.64 R14, [R20.64+0x58] ; /* 0x00005804140e7981 */
/* 0x002eaa000c1e1b00 */
/*05e0*/ STG.E.64 [R20.64+0x50], R26 ; /* 0x0000501a14007986 */
/* 0x0083e8000c101b04 */
/*05f0*/ LDG.E.64 R18, [R12.64+0x58] ; /* 0x000058040c127981 */
/* 0x000ea8000c1e1b00 */
/*0600*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ea4000c1e1b00 */
/*0610*/ DFMA R24, R18, R16, R14 ; /* 0x000000101218722b */
/* 0x0044c4000000000e */
/*0620*/ LDG.E.64 R14, [R20.64+0x60] ; /* 0x00006004140e7981 */
/* 0x004e2a000c1e1b00 */
/*0630*/ STG.E.64 [R20.64+0x58], R24 ; /* 0x0000581814007986 */
/* 0x0085e8000c101b04 */
/*0640*/ LDG.E.64 R18, [R12.64+0x60] ; /* 0x000060040c127981 */
/* 0x000e28000c1e1b00 */
/*0650*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e24000c1e1b00 */
/*0660*/ DFMA R28, R18, R16, R14 ; /* 0x00000010121c722b */
/* 0x0010c4000000000e */
/*0670*/ LDG.E.64 R14, [R20.64+0x68] ; /* 0x00006804140e7981 */
/* 0x001e6a000c1e1b00 */
/*0680*/ STG.E.64 [R20.64+0x60], R28 ; /* 0x0000601c14007986 */
/* 0x0081e8000c101b04 */
/*0690*/ LDG.E.64 R18, [R12.64+0x68] ; /* 0x000068040c127981 */
/* 0x000e68000c1e1b00 */
/*06a0*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000e64000c1e1b00 */
/*06b0*/ DFMA R26, R18, R16, R14 ; /* 0x00000010121a722b */
/* 0x0022c4000000000e */
/*06c0*/ LDG.E.64 R14, [R20.64+0x70] ; /* 0x00007004140e7981 */
/* 0x002eaa000c1e1b00 */
/*06d0*/ STG.E.64 [R20.64+0x68], R26 ; /* 0x0000681a14007986 */
/* 0x0081e8000c101b04 */
/*06e0*/ LDG.E.64 R18, [R12.64+0x70] ; /* 0x000070040c127981 */
/* 0x000ea8000c1e1b00 */
/*06f0*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ea2000c1e1b00 */
/*0700*/ IADD3 R8, R8, -0x10, RZ ; /* 0xfffffff008087810 */
/* 0x000fe20007ffe0ff */
/*0710*/ DFMA R24, R18, R16, R14 ; /* 0x000000101218722b */
/* 0x004284000000000e */
/*0720*/ LDG.E.64 R14, [R20.64+0x78] ; /* 0x00007804140e7981 */
/* 0x002eea000c1e1b00 */
/*0730*/ STG.E.64 [R20.64+0x70], R24 ; /* 0x0000701814007986 */
/* 0x0041e8000c101b04 */
/*0740*/ LDG.E.64 R18, [R12.64+0x78] ; /* 0x000078040c127981 */
/* 0x000ee8000c1e1b00 */
/*0750*/ LDG.E.64 R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x000ee2000c1e1b00 */
/*0760*/ ISETP.GT.AND P3, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fc40003f64270 */
/*0770*/ IADD3 R22, P4, R22, 0x80, RZ ; /* 0x0000008016167810 */
/* 0x000fe40007f9e0ff */
/*0780*/ IADD3 R9, R9, 0x10, RZ ; /* 0x0000001009097810 */
/* 0x000fc60007ffe0ff */
/*0790*/ IMAD.X R23, RZ, RZ, R23, P4 ; /* 0x000000ffff177224 */
/* 0x000fe200020e0617 */
/*07a0*/ DFMA R14, R18, R16, R14 ; /* 0x00000010120e722b */
/* 0x0082a4000000000e */
/*07b0*/ IADD3 R16, P5, R12, 0x80, RZ ; /* 0x000000800c107810 */
/* 0x002fc80007fbe0ff */
/*07c0*/ IADD3.X R17, RZ, R13, RZ, P5, !PT ; /* 0x0000000dff117210 */
/* 0x000fe20002ffe4ff */
/*07d0*/ STG.E.64 [R20.64+0x78], R14 ; /* 0x0000780e14007986 */
/* 0x0041e2000c101b04 */
/*07e0*/ IMAD.MOV.U32 R12, RZ, RZ, R16 ; /* 0x000000ffff0c7224 */
/* 0x000fc600078e0010 */
/*07f0*/ IMAD.MOV.U32 R13, RZ, RZ, R17 ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e0011 */
/*0800*/ @P3 BRA 0x260 ; /* 0xfffffa5000003947 */
/* 0x000fea000383ffff */
/*0810*/ ISETP.GT.AND P3, PT, R8, 0x4, PT ; /* 0x000000040800780c */
/* 0x000fda0003f64270 */
/*0820*/ @!P3 BRA 0xb50 ; /* 0x000003200000b947 */
/* 0x000fea0003800000 */
/*0830*/ IMAD.WIDE R14, R4, 0x8, R22 ; /* 0x00000008040e7825 */
/* 0x001fe200078e0216 */
/*0840*/ LDG.E.64 R20, [R12.64] ; /* 0x000000040c147981 */
/* 0x000ea8000c1e1b00 */
/*0850*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea8000c1e1b00 */
/*0860*/ LDG.E.64 R16, [R14.64] ; /* 0x000000040e107981 */
/* 0x000ea4000c1e1b00 */
/*0870*/ DFMA R24, R20, R18, R16 ; /* 0x000000121418722b */
/* 0x0040440000000010 */
/*0880*/ LDG.E.64 R16, [R14.64+0x8] ; /* 0x000008040e107981 */
/* 0x001eaa000c1e1b00 */
/*0890*/ STG.E.64 [R14.64], R24 ; /* 0x000000180e007986 */
/* 0x0021e8000c101b04 */
/*08a0*/ LDG.E.64 R20, [R12.64+0x8] ; /* 0x000008040c147981 */
/* 0x000ea8000c1e1b00 */
/*08b0*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea4000c1e1b00 */
/*08c0*/ DFMA R28, R20, R18, R16 ; /* 0x00000012141c722b */
/* 0x0042840000000010 */
/*08d0*/ LDG.E.64 R16, [R14.64+0x10] ; /* 0x000010040e107981 */
/* 0x002eea000c1e1b00 */
/*08e0*/ STG.E.64 [R14.64+0x8], R28 ; /* 0x0000081c0e007986 */
/* 0x0043e8000c101b04 */
/*08f0*/ LDG.E.64 R20, [R12.64+0x10] ; /* 0x000010040c147981 */
/* 0x000ee8000c1e1b00 */
/*0900*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ee4000c1e1b00 */
/*0910*/ DFMA R26, R20, R18, R16 ; /* 0x00000012141a722b */
/* 0x0084c40000000010 */
/*0920*/ LDG.E.64 R16, [R14.64+0x18] ; /* 0x000018040e107981 */
/* 0x004e2a000c1e1b00 */
/*0930*/ STG.E.64 [R14.64+0x10], R26 ; /* 0x0000101a0e007986 */
/* 0x0085e8000c101b04 */
/*0940*/ LDG.E.64 R20, [R12.64+0x18] ; /* 0x000018040c147981 */
/* 0x000e28000c1e1b00 */
/*0950*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000e24000c1e1b00 */
/*0960*/ DFMA R24, R20, R18, R16 ; /* 0x000000121418722b */
/* 0x0010c40000000010 */
/*0970*/ LDG.E.64 R16, [R14.64+0x20] ; /* 0x000020040e107981 */
/* 0x001e6a000c1e1b00 */
/*0980*/ STG.E.64 [R14.64+0x18], R24 ; /* 0x000018180e007986 */
/* 0x0081e8000c101b04 */
/*0990*/ LDG.E.64 R20, [R12.64+0x20] ; /* 0x000020040c147981 */
/* 0x000e68000c1e1b00 */
/*09a0*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000e64000c1e1b00 */
/*09b0*/ DFMA R28, R20, R18, R16 ; /* 0x00000012141c722b */
/* 0x0022c40000000010 */
/*09c0*/ LDG.E.64 R16, [R14.64+0x28] ; /* 0x000028040e107981 */
/* 0x002eaa000c1e1b00 */
/*09d0*/ STG.E.64 [R14.64+0x20], R28 ; /* 0x0000201c0e007986 */
/* 0x0083e8000c101b04 */
/*09e0*/ LDG.E.64 R20, [R12.64+0x28] ; /* 0x000028040c147981 */
/* 0x000ea8000c1e1b00 */
/*09f0*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea4000c1e1b00 */
/*0a00*/ DFMA R26, R20, R18, R16 ; /* 0x00000012141a722b */
/* 0x0044c40000000010 */
/*0a10*/ LDG.E.64 R16, [R14.64+0x30] ; /* 0x000030040e107981 */
/* 0x004e2a000c1e1b00 */
/*0a20*/ STG.E.64 [R14.64+0x28], R26 ; /* 0x0000281a0e007986 */
/* 0x0083e8000c101b04 */
/*0a30*/ LDG.E.64 R18, [R12.64+0x30] ; /* 0x000030040c127981 */
/* 0x000e28000c1e1b00 */
/*0a40*/ LDG.E.64 R20, [R10.64] ; /* 0x000000040a147981 */
/* 0x000e24000c1e1b00 */
/*0a50*/ DFMA R24, R18, R20, R16 ; /* 0x000000141218722b */
/* 0x0010840000000010 */
/*0a60*/ LDG.E.64 R16, [R14.64+0x38] ; /* 0x000038040e107981 */
/* 0x001eea000c1e1b00 */
/*0a70*/ STG.E.64 [R14.64+0x30], R24 ; /* 0x000030180e007986 */
/* 0x0043e8000c101b04 */
/*0a80*/ LDG.E.64 R18, [R12.64+0x38] ; /* 0x000038040c127981 */
/* 0x000ee8000c1e1b00 */
/*0a90*/ LDG.E.64 R20, [R10.64] ; /* 0x000000040a147981 */
/* 0x000ee2000c1e1b00 */
/*0aa0*/ IADD3 R22, P3, R22, 0x40, RZ ; /* 0x0000004016167810 */
/* 0x000fc40007f7e0ff */
/*0ab0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0ac0*/ IADD3 R9, R9, 0x8, RZ ; /* 0x0000000809097810 */
/* 0x000fe20007ffe0ff */
/*0ad0*/ IMAD.X R23, RZ, RZ, R23, P3 ; /* 0x000000ffff177224 */
/* 0x000fe200018e0617 */
/*0ae0*/ IADD3 R8, R8, -0x8, RZ ; /* 0xfffffff808087810 */
/* 0x000fe20007ffe0ff */
/*0af0*/ DFMA R16, R18, R20, R16 ; /* 0x000000141210722b */
/* 0x0080a40000000010 */
/*0b00*/ IADD3 R18, P4, R12, 0x40, RZ ; /* 0x000000400c127810 */
/* 0x001fca0007f9e0ff */
/*0b10*/ STG.E.64 [R14.64+0x38], R16 ; /* 0x000038100e007986 */
/* 0x0043e2000c101b04 */
/*0b20*/ IMAD.X R19, RZ, RZ, R13, P4 ; /* 0x000000ffff137224 */
/* 0x000fe400020e060d */
/*0b30*/ IMAD.MOV.U32 R12, RZ, RZ, R18 ; /* 0x000000ffff0c7224 */
/* 0x000fe400078e0012 */
/*0b40*/ IMAD.MOV.U32 R13, RZ, RZ, R19 ; /* 0x000000ffff0d7224 */
/* 0x000fe400078e0013 */
/*0b50*/ ISETP.NE.OR P0, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */
/* 0x000fda0000705670 */
/*0b60*/ @!P0 BRA 0xd60 ; /* 0x000001f000008947 */
/* 0x000fea0003800000 */
/*0b70*/ IMAD.WIDE R14, R4, 0x8, R22 ; /* 0x00000008040e7825 */
/* 0x003fe200078e0216 */
/*0b80*/ LDG.E.64 R20, [R12.64] ; /* 0x000000040c147981 */
/* 0x000ea8000c1e1b00 */
/*0b90*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea8000c1e1b00 */
/*0ba0*/ LDG.E.64 R16, [R14.64] ; /* 0x000000040e107981 */
/* 0x000ea4000c1e1b00 */
/*0bb0*/ DFMA R28, R20, R18, R16 ; /* 0x00000012141c722b */
/* 0x0040440000000010 */
/*0bc0*/ LDG.E.64 R16, [R14.64+0x8] ; /* 0x000008040e107981 */
/* 0x001eaa000c1e1b00 */
/*0bd0*/ STG.E.64 [R14.64], R28 ; /* 0x0000001c0e007986 */
/* 0x0021e8000c101b04 */
/*0be0*/ LDG.E.64 R20, [R12.64+0x8] ; /* 0x000008040c147981 */
/* 0x000ea8000c1e1b00 */
/*0bf0*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea4000c1e1b00 */
/*0c00*/ DFMA R26, R20, R18, R16 ; /* 0x00000012141a722b */
/* 0x0042840000000010 */
/*0c10*/ LDG.E.64 R16, [R14.64+0x10] ; /* 0x000010040e107981 */
/* 0x002eea000c1e1b00 */
/*0c20*/ STG.E.64 [R14.64+0x8], R26 ; /* 0x0000081a0e007986 */
/* 0x0041e8000c101b04 */
/*0c30*/ LDG.E.64 R20, [R12.64+0x10] ; /* 0x000010040c147981 */
/* 0x000ee8000c1e1b00 */
/*0c40*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ee2000c1e1b00 */
/*0c50*/ IADD3 R8, R8, -0x4, RZ ; /* 0xfffffffc08087810 */
/* 0x000fe20007ffe0ff */
/*0c60*/ DFMA R24, R20, R18, R16 ; /* 0x000000121418722b */
/* 0x0082840000000010 */
/*0c70*/ LDG.E.64 R16, [R14.64+0x18] ; /* 0x000018040e107981 */
/* 0x002eea000c1e1b00 */
/*0c80*/ STG.E.64 [R14.64+0x10], R24 ; /* 0x000010180e007986 */
/* 0x0041e8000c101b04 */
/*0c90*/ LDG.E.64 R20, [R12.64+0x18] ; /* 0x000018040c147981 */
/* 0x000ee8000c1e1b00 */
/*0ca0*/ LDG.E.64 R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ee2000c1e1b00 */
/*0cb0*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fc40003f05270 */
/*0cc0*/ IADD3 R22, P3, R22, 0x20, RZ ; /* 0x0000002016167810 */
/* 0x000fe40007f7e0ff */
/*0cd0*/ IADD3 R9, R9, 0x4, RZ ; /* 0x0000000409097810 */
/* 0x000fc60007ffe0ff */
/*0ce0*/ IMAD.X R23, RZ, RZ, R23, P3 ; /* 0x000000ffff177224 */
/* 0x000fe200018e0617 */
/*0cf0*/ DFMA R16, R20, R18, R16 ; /* 0x000000121410722b */
/* 0x0082a40000000010 */
/*0d00*/ IADD3 R18, P4, R12, 0x20, RZ ; /* 0x000000200c127810 */
/* 0x002fc80007f9e0ff */
/*0d10*/ IADD3.X R19, RZ, R13, RZ, P4, !PT ; /* 0x0000000dff137210 */
/* 0x000fe200027fe4ff */
/*0d20*/ STG.E.64 [R14.64+0x18], R16 ; /* 0x000018100e007986 */
/* 0x0041e2000c101b04 */
/*0d30*/ IMAD.MOV.U32 R12, RZ, RZ, R18 ; /* 0x000000ffff0c7224 */
/* 0x000fc600078e0012 */
/*0d40*/ IMAD.MOV.U32 R13, RZ, RZ, R19 ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e0013 */
/*0d50*/ @P0 BRA 0xb70 ; /* 0xfffffe1000000947 */
/* 0x001fea000383ffff */
/*0d60*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*0d70*/ @!P0 BRA 0xef0 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*0d80*/ IMAD.IADD R8, R6, 0x1, R9.reuse ; /* 0x0000000106087824 */
/* 0x100fe200078e0209 */
/*0d90*/ LDG.E.64 R14, [R10.64] ; /* 0x000000040a0e7981 */
/* 0x003ea2000c1e1b00 */
/*0da0*/ IMAD.IADD R6, R4, 0x1, R9 ; /* 0x0000000104067824 */
/* 0x000fe400078e0209 */
/*0db0*/ IMAD.WIDE R8, R8, R7, c[0x0][0x178] ; /* 0x00005e0008087625 */
/* 0x000fc800078e0207 */
/*0dc0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x180] ; /* 0x0000600006067625 */
/* 0x000fe200078e0207 */
/*0dd0*/ LDG.E.64 R12, [R8.64] ; /* 0x00000004080c7981 */
/* 0x000ea8000c1e1b00 */
/*0de0*/ LDG.E.64 R16, [R6.64] ; /* 0x0000000406107981 */
/* 0x000ea2000c1e1b00 */
/*0df0*/ ISETP.NE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fe20003f05270 */
/*0e00*/ DFMA R12, R12, R14, R16 ; /* 0x0000000e0c0c722b */
/* 0x004e0e0000000010 */
/*0e10*/ STG.E.64 [R6.64], R12 ; /* 0x0000000c06007986 */
/* 0x0011ea000c101b04 */
/*0e20*/ @!P0 BRA 0xef0 ; /* 0x000000c000008947 */
/* 0x000fea0003800000 */
/*0e30*/ LDG.E.64 R12, [R8.64+0x8] ; /* 0x00000804080c7981 */
/* 0x001ea8000c1e1b00 */
/*0e40*/ LDG.E.64 R14, [R10.64] ; /* 0x000000040a0e7981 */
/* 0x000ea8000c1e1b00 */
/*0e50*/ LDG.E.64 R16, [R6.64+0x8] ; /* 0x0000080406107981 */
/* 0x000ea2000c1e1b00 */
/*0e60*/ ISETP.NE.AND P0, PT, R0, 0x2, PT ; /* 0x000000020000780c */
/* 0x000fe20003f05270 */
/*0e70*/ DFMA R12, R12, R14, R16 ; /* 0x0000000e0c0c722b */
/* 0x004e0e0000000010 */
/*0e80*/ STG.E.64 [R6.64+0x8], R12 ; /* 0x0000080c06007986 */
/* 0x0011ea000c101b04 */
/*0e90*/ @!P0 BRA 0xef0 ; /* 0x0000005000008947 */
/* 0x000fea0003800000 */
/*0ea0*/ LDG.E.64 R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000ea8000c1e1b00 */
/*0eb0*/ LDG.E.64 R8, [R8.64+0x10] ; /* 0x0000100408087981 */
/* 0x000ea8000c1e1b00 */
/*0ec0*/ LDG.E.64 R12, [R6.64+0x10] ; /* 0x00001004060c7981 */
/* 0x001ea4000c1e1b00 */
/*0ed0*/ DFMA R12, R10, R8, R12 ; /* 0x000000080a0c722b */
/* 0x004e0e000000000c */
/*0ee0*/ STG.E.64 [R6.64+0x10], R12 ; /* 0x0000100c06007986 */
/* 0x0011e8000c101b04 */
/*0ef0*/ @!P2 BRA 0x130 ; /* 0xfffff2300000a947 */
/* 0x000fea000383ffff */
/*0f00*/ IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102027810 */
/* 0x000fc80007ffe0ff */
/*0f10*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x160], PT ; /* 0x0000580002007a0c */
/* 0x000fda0003f06270 */
/*0f20*/ @P0 CALL.REL.NOINC 0xf40 ; /* 0x0000001000000944 */
/* 0x000fe20003c00000 */
/*0f30*/ BRA 0xe0 ; /* 0xfffff1a000007947 */
/* 0x000fea000383ffff */
/*0f40*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0f50*/ BRA 0xf50; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0f60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fe0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ff0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z2m1iiiPdS_S_
.globl _Z2m1iiiPdS_S_
.p2align 8
.type _Z2m1iiiPdS_S_,@function
_Z2m1iiiPdS_S_:
s_load_b32 s22, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s22, 1
s_cbranch_scc1 .LBB0_9
s_clause 0x2
s_load_b64 s[2:3], s[0:1], 0x4
s_load_b128 s[4:7], s[0:1], 0x10
s_load_b64 s[8:9], s[0:1], 0x20
s_mov_b32 s11, 0
s_mov_b32 s23, 0
s_mov_b32 s12, s11
s_waitcnt lgkmcnt(0)
s_cmp_gt_i32 s3, 0
s_cselect_b32 s0, -1, 0
s_cmp_gt_i32 s2, 0
v_cndmask_b32_e64 v0, 0, 1, s0
s_cselect_b32 s0, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v1, 0, 1, s0
v_cmp_ne_u32_e64 s0, 1, v0
v_mov_b32_e32 v0, 0
s_delay_alu instid0(VALU_DEP_3)
v_cmp_ne_u32_e64 s1, 1, v1
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_add_i32 s23, s23, 1
s_add_i32 s12, s12, s2
s_cmp_eq_u32 s23, s22
s_cbranch_scc1 .LBB0_9
.LBB0_3:
s_and_b32 vcc_lo, exec_lo, s0
s_cbranch_vccnz .LBB0_2
s_mov_b32 s13, s11
s_mov_b32 s10, 0
s_lshl_b64 s[14:15], s[12:13], 3
s_mul_i32 s24, s23, s3
s_add_u32 s14, s8, s14
s_addc_u32 s15, s9, s15
s_mov_b32 s13, s10
s_branch .LBB0_6
.p2align 6
.LBB0_5:
s_add_i32 s13, s13, 1
s_add_i32 s10, s10, s2
s_cmp_eq_u32 s13, s3
s_cbranch_scc1 .LBB0_2
.LBB0_6:
s_and_b32 vcc_lo, exec_lo, s1
s_cbranch_vccnz .LBB0_5
s_lshl_b64 s[16:17], s[10:11], 3
s_mov_b32 s19, s11
s_add_u32 s16, s6, s16
s_addc_u32 s17, s7, s17
s_add_i32 s18, s13, s24
s_mov_b32 s25, s2
s_lshl_b64 s[20:21], s[18:19], 3
s_mov_b64 s[18:19], s[14:15]
s_add_u32 s20, s4, s20
s_addc_u32 s21, s5, s21
.p2align 6
.LBB0_8:
s_clause 0x2
global_load_b64 v[1:2], v0, s[20:21]
global_load_b64 v[3:4], v0, s[16:17]
global_load_b64 v[5:6], v0, s[18:19]
s_add_i32 s25, s25, -1
s_waitcnt vmcnt(0)
v_fma_f64 v[1:2], v[1:2], v[3:4], v[5:6]
global_store_b64 v0, v[1:2], s[18:19]
s_add_u32 s18, s18, 8
s_addc_u32 s19, s19, 0
s_add_u32 s16, s16, 8
s_addc_u32 s17, s17, 0
s_cmp_eq_u32 s25, 0
s_cbranch_scc0 .LBB0_8
s_branch .LBB0_5
.LBB0_9:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z2m1iiiPdS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 40
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 26
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z2m1iiiPdS_S_, .Lfunc_end0-_Z2m1iiiPdS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 40
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z2m1iiiPdS_S_
.private_segment_fixed_size: 0
.sgpr_count: 28
.sgpr_spill_count: 0
.symbol: _Z2m1iiiPdS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000b0dc7_00000000-6_matmult_gpu1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z28__device_stub__Z2m1iiiPdS_S_iiiPdS_S_
.type _Z28__device_stub__Z2m1iiiPdS_S_iiiPdS_S_, @function
_Z28__device_stub__Z2m1iiiPdS_S_iiiPdS_S_:
.LFB2052:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 44(%rsp)
movl %esi, 40(%rsp)
movl %edx, 36(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z2m1iiiPdS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z28__device_stub__Z2m1iiiPdS_S_iiiPdS_S_, .-_Z28__device_stub__Z2m1iiiPdS_S_iiiPdS_S_
.globl _Z2m1iiiPdS_S_
.type _Z2m1iiiPdS_S_, @function
_Z2m1iiiPdS_S_:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z2m1iiiPdS_S_iiiPdS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z2m1iiiPdS_S_, .-_Z2m1iiiPdS_S_
.globl matmult_gpu1
.type matmult_gpu1, @function
matmult_gpu1:
.LFB2027:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movl %edi, %r13d
movl %esi, %r14d
movl %edx, %r15d
movq %rcx, 8(%rsp)
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $2, %edi
call cudaSetDevice@PLT
movl %r13d, %r12d
imull %r15d, %r12d
movslq %r12d, %r12
salq $3, %r12
leaq 40(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
movl %r15d, %ebp
imull %r14d, %ebp
movslq %ebp, %rbp
salq $3, %rbp
leaq 48(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl %r13d, %ebx
imull %r14d, %ebx
movslq %ebx, %rbx
salq $3, %rbx
leaq 56(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r12, %rdx
movq 8(%rsp), %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbp, %rdx
movq 16(%rsp), %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movq %rbx, %rdx
movl $0, %esi
movq 56(%rsp), %rdi
call cudaMemset@PLT
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 56(%rsp), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movq 56(%rsp), %r9
movq 48(%rsp), %r8
movq 40(%rsp), %rcx
movl %r15d, %edx
movl %r14d, %esi
movl %r13d, %edi
call _Z28__device_stub__Z2m1iiiPdS_S_iiiPdS_S_
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size matmult_gpu1, .-matmult_gpu1
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z2m1iiiPdS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z2m1iiiPdS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "matmult_gpu1.hip"
.globl _Z17__device_stub__m1iiiPdS_S_ # -- Begin function _Z17__device_stub__m1iiiPdS_S_
.p2align 4, 0x90
.type _Z17__device_stub__m1iiiPdS_S_,@function
_Z17__device_stub__m1iiiPdS_S_: # @_Z17__device_stub__m1iiiPdS_S_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z2m1iiiPdS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z17__device_stub__m1iiiPdS_S_, .Lfunc_end0-_Z17__device_stub__m1iiiPdS_S_
.cfi_endproc
# -- End function
.globl matmult_gpu1 # -- Begin function matmult_gpu1
.p2align 4, 0x90
.type matmult_gpu1,@function
matmult_gpu1: # @matmult_gpu1
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, 64(%rsp) # 8-byte Spill
movq %r8, 56(%rsp) # 8-byte Spill
movq %rcx, 48(%rsp) # 8-byte Spill
movl %edx, %ebp
movl %esi, %r15d
movl %edi, %r12d
movl $2, %edi
callq hipSetDevice
movl %ebp, %eax
imull %r12d, %eax
movslq %eax, %r13
shlq $3, %r13
leaq 24(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
movl %ebp, %eax
imull %r15d, %eax
movslq %eax, %rbx
shlq $3, %rbx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movl %r15d, %eax
imull %r12d, %eax
movslq %eax, %r14
shlq $3, %r14
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq 48(%rsp), %rsi # 8-byte Reload
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movq 56(%rsp), %rsi # 8-byte Reload
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
xorl %esi, %esi
movq %r14, %rdx
callq hipMemset
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movl %r12d, 44(%rsp)
movl %r15d, 40(%rsp)
movl %ebp, 36(%rsp)
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %rdx, 120(%rsp)
leaq 44(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rax
movq %rax, 152(%rsp)
leaq 36(%rsp), %rax
movq %rax, 160(%rsp)
leaq 136(%rsp), %rax
movq %rax, 168(%rsp)
leaq 128(%rsp), %rax
movq %rax, 176(%rsp)
leaq 120(%rsp), %rax
movq %rax, 184(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z2m1iiiPdS_S_, %edi
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
movq 8(%rsp), %rsi
movq 64(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size matmult_gpu1, .Lfunc_end1-matmult_gpu1
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z2m1iiiPdS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z2m1iiiPdS_S_,@object # @_Z2m1iiiPdS_S_
.section .rodata,"a",@progbits
.globl _Z2m1iiiPdS_S_
.p2align 3, 0x0
_Z2m1iiiPdS_S_:
.quad _Z17__device_stub__m1iiiPdS_S_
.size _Z2m1iiiPdS_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z2m1iiiPdS_S_"
.size .L__unnamed_1, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z17__device_stub__m1iiiPdS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z2m1iiiPdS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | __global__ void inout(short int * out, short int * in, short Res)
{
const int i = threadIdx.x;
out[i] = in[i] * Res/1000000;
} | code for sm_80
Function : _Z5inoutPsS_s
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 1.1920928955078125e-07 ; /* 0x00000002ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x001fcc00078e0205 */
/*0050*/ LDG.E.U16 R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1500 */
/*0060*/ ULDC.U16 UR4, c[0x0][0x170] ; /* 0x00005c0000047ab9 */
/* 0x000fe20000000400 */
/*0070*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe200078e0205 */
/*0080*/ UPRMT UR4, UR4, 0x9910, URZ ; /* 0x0000991004047896 */
/* 0x000fe2000800003f */
/*0090*/ PRMT R0, R2, 0x9910, RZ ; /* 0x0000991002007816 */
/* 0x004fca00000000ff */
/*00a0*/ IMAD R0, R0, UR4, RZ ; /* 0x0000000400007c24 */
/* 0x000fc8000f8e02ff */
/*00b0*/ IMAD.HI R0, R0, 0x431bde83, RZ ; /* 0x431bde8300007827 */
/* 0x000fca00078e02ff */
/*00c0*/ SHF.R.U32.HI R7, RZ, 0x1f, R0 ; /* 0x0000001fff077819 */
/* 0x000fc80000011600 */
/*00d0*/ LEA.HI.SX32 R7, R0, R7, 0xe ; /* 0x0000000700077211 */
/* 0x000fca00078f72ff */
/*00e0*/ STG.E.U16 [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101506 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | __global__ void inout(short int * out, short int * in, short Res)
{
const int i = threadIdx.x;
out[i] = in[i] * Res/1000000;
} | .file "tmpxft_00026e87_00000000-6_test.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z5inoutPsS_sPsS_s
.type _Z27__device_stub__Z5inoutPsS_sPsS_s, @function
_Z27__device_stub__Z5inoutPsS_sPsS_s:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movw %dx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5inoutPsS_s(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z27__device_stub__Z5inoutPsS_sPsS_s, .-_Z27__device_stub__Z5inoutPsS_sPsS_s
.globl _Z5inoutPsS_s
.type _Z5inoutPsS_s, @function
_Z5inoutPsS_s:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movswl %dx, %edx
call _Z27__device_stub__Z5inoutPsS_sPsS_s
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z5inoutPsS_s, .-_Z5inoutPsS_s
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z5inoutPsS_s"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z5inoutPsS_s(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | __global__ void inout(short int * out, short int * in, short Res)
{
const int i = threadIdx.x;
out[i] = in[i] * Res/1000000;
} | #include <hip/hip_runtime.h>
__global__ void inout(short int * out, short int * in, short Res)
{
const int i = threadIdx.x;
out[i] = in[i] * Res/1000000;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
__global__ void inout(short int * out, short int * in, short Res)
{
const int i = threadIdx.x;
out[i] = in[i] * Res/1000000;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5inoutPsS_s
.globl _Z5inoutPsS_s
.p2align 8
.type _Z5inoutPsS_s,@function
_Z5inoutPsS_s:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 1, v0
s_load_b32 s0, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
global_load_i16 v1, v0, s[6:7]
s_sext_i32_i16 s0, s0
s_waitcnt vmcnt(0)
v_mul_i32_i24_e32 v1, s0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_i32 v1, v1, 0x431bde83
v_lshrrev_b32_e32 v2, 31, v1
v_ashrrev_i32_e32 v1, 18, v1
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v1, v1, v2
global_store_b16 v0, v1, s[4:5]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5inoutPsS_s
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5inoutPsS_s, .Lfunc_end0-_Z5inoutPsS_s
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 2
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5inoutPsS_s
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z5inoutPsS_s.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
__global__ void inout(short int * out, short int * in, short Res)
{
const int i = threadIdx.x;
out[i] = in[i] * Res/1000000;
} | .text
.file "test.hip"
.globl _Z20__device_stub__inoutPsS_s # -- Begin function _Z20__device_stub__inoutPsS_s
.p2align 4, 0x90
.type _Z20__device_stub__inoutPsS_s,@function
_Z20__device_stub__inoutPsS_s: # @_Z20__device_stub__inoutPsS_s
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movw %dx, 14(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 14(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5inoutPsS_s, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z20__device_stub__inoutPsS_s, .Lfunc_end0-_Z20__device_stub__inoutPsS_s
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5inoutPsS_s, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5inoutPsS_s,@object # @_Z5inoutPsS_s
.section .rodata,"a",@progbits
.globl _Z5inoutPsS_s
.p2align 3, 0x0
_Z5inoutPsS_s:
.quad _Z20__device_stub__inoutPsS_s
.size _Z5inoutPsS_s, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z5inoutPsS_s"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__inoutPsS_s
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5inoutPsS_s
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z5inoutPsS_s
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 1.1920928955078125e-07 ; /* 0x00000002ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x001fcc00078e0205 */
/*0050*/ LDG.E.U16 R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1500 */
/*0060*/ ULDC.U16 UR4, c[0x0][0x170] ; /* 0x00005c0000047ab9 */
/* 0x000fe20000000400 */
/*0070*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe200078e0205 */
/*0080*/ UPRMT UR4, UR4, 0x9910, URZ ; /* 0x0000991004047896 */
/* 0x000fe2000800003f */
/*0090*/ PRMT R0, R2, 0x9910, RZ ; /* 0x0000991002007816 */
/* 0x004fca00000000ff */
/*00a0*/ IMAD R0, R0, UR4, RZ ; /* 0x0000000400007c24 */
/* 0x000fc8000f8e02ff */
/*00b0*/ IMAD.HI R0, R0, 0x431bde83, RZ ; /* 0x431bde8300007827 */
/* 0x000fca00078e02ff */
/*00c0*/ SHF.R.U32.HI R7, RZ, 0x1f, R0 ; /* 0x0000001fff077819 */
/* 0x000fc80000011600 */
/*00d0*/ LEA.HI.SX32 R7, R0, R7, 0xe ; /* 0x0000000700077211 */
/* 0x000fca00078f72ff */
/*00e0*/ STG.E.U16 [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101506 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5inoutPsS_s
.globl _Z5inoutPsS_s
.p2align 8
.type _Z5inoutPsS_s,@function
_Z5inoutPsS_s:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 1, v0
s_load_b32 s0, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
global_load_i16 v1, v0, s[6:7]
s_sext_i32_i16 s0, s0
s_waitcnt vmcnt(0)
v_mul_i32_i24_e32 v1, s0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_i32 v1, v1, 0x431bde83
v_lshrrev_b32_e32 v2, 31, v1
v_ashrrev_i32_e32 v1, 18, v1
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v1, v1, v2
global_store_b16 v0, v1, s[4:5]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5inoutPsS_s
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5inoutPsS_s, .Lfunc_end0-_Z5inoutPsS_s
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 2
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5inoutPsS_s
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z5inoutPsS_s.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00026e87_00000000-6_test.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z5inoutPsS_sPsS_s
.type _Z27__device_stub__Z5inoutPsS_sPsS_s, @function
_Z27__device_stub__Z5inoutPsS_sPsS_s:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movw %dx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5inoutPsS_s(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z27__device_stub__Z5inoutPsS_sPsS_s, .-_Z27__device_stub__Z5inoutPsS_sPsS_s
.globl _Z5inoutPsS_s
.type _Z5inoutPsS_s, @function
_Z5inoutPsS_s:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movswl %dx, %edx
call _Z27__device_stub__Z5inoutPsS_sPsS_s
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z5inoutPsS_s, .-_Z5inoutPsS_s
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z5inoutPsS_s"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z5inoutPsS_s(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "test.hip"
.globl _Z20__device_stub__inoutPsS_s # -- Begin function _Z20__device_stub__inoutPsS_s
.p2align 4, 0x90
.type _Z20__device_stub__inoutPsS_s,@function
_Z20__device_stub__inoutPsS_s: # @_Z20__device_stub__inoutPsS_s
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movw %dx, 14(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 14(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5inoutPsS_s, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z20__device_stub__inoutPsS_s, .Lfunc_end0-_Z20__device_stub__inoutPsS_s
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5inoutPsS_s, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5inoutPsS_s,@object # @_Z5inoutPsS_s
.section .rodata,"a",@progbits
.globl _Z5inoutPsS_s
.p2align 3, 0x0
_Z5inoutPsS_s:
.quad _Z20__device_stub__inoutPsS_s
.size _Z5inoutPsS_s, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z5inoutPsS_s"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__inoutPsS_s
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5inoutPsS_s
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <ctime>
__global__ void
setup_random_kernel(curandState *state, int length, int offset) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curand_init((unsigned long long) clock(), idx, 0, &state[idx]);
}
}
__global__ void
get_random_array(curandState *state, int length, double *out_array) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
out_array[idx] = curand_uniform_double(&localState);
}
}
/*
* PHASE ONE
*/
__global__ void
phase_one_shift(double *block_b, double *block_i, double *block_n, int *shift, curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Compute block and compute its offset
if (idx < length) {
curandState localState = state[idx];
shift[idx] = (int) __double2int_rn(
block_i[idx] + __double2int_rd(curand_uniform(&localState) * block_b[idx]) * block_n[idx]);
state[idx] = localState;
}
}
__global__ void
phase_one_i(int *i, double *block_b, double *block_i, double *block_n, int *shift, curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Choose first node
if (idx < length) {
curandState localState = state[idx];
i[idx] = (int) __double2int_rn(__double2int_rd(curand_uniform(&localState) * block_n[idx]) + shift[idx]);
state[idx] = localState;
}
}
__global__ void
phase_one_j(int *i, int *j, double *block_b, double *block_i, double *block_n, int *shift,
curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
// Choose second node
// "Without replacement"
j[idx] = (int) __double2int_rn(__double2int_rd(curand_uniform(&localState) * (block_n[idx] - 1)) + shift[idx]);
// Remove loops
if (j[idx] >= i[idx]) {
++j[idx];
}
state[idx] = localState;
}
}
/*
* PHASE TWO
*/
__global__ void
phase_two_fill(double *phase_two_shift_fill, double *phase_two_sz_fill, double *phase_two_fill,
curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
phase_two_fill[idx] =
phase_two_shift_fill[idx] + __double2int_rd(curand_uniform(&localState) * phase_two_sz_fill[idx]);
state[idx] = localState;
}
}
__global__ void
phase_two_bulk(double *phase_two_shift_bulk, double *phase_two_sz_bulk, double *phase_two_bulk,
curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
phase_two_bulk[idx] =
phase_two_shift_bulk[idx] + __double2int_rd(curand_uniform(&localState) * phase_two_sz_bulk[idx]);
state[idx] = localState;
}
}
__global__ void
phase_two_d(double *phase_two_fill, double *phase_two_bulk, int *phase_two, double *phase_two_rd_fill,
curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
if (curand_uniform(&localState) < phase_two_rd_fill[idx]) {
phase_two[idx] = (int) __double2int_rn(phase_two_fill[idx]);
} else {
phase_two[idx] = (int) __double2int_rn(phase_two_bulk[idx]);
}
state[idx] = localState;
}
} | .file "tmpxft_00028b01_00000000-6_PhasesKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2273:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2273:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z59__device_stub__Z19setup_random_kernelP17curandStateXORWOWiiP17curandStateXORWOWii
.type _Z59__device_stub__Z19setup_random_kernelP17curandStateXORWOWiiP17curandStateXORWOWii, @function
_Z59__device_stub__Z19setup_random_kernelP17curandStateXORWOWiiP17curandStateXORWOWii:
.LFB2295:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z19setup_random_kernelP17curandStateXORWOWii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2295:
.size _Z59__device_stub__Z19setup_random_kernelP17curandStateXORWOWiiP17curandStateXORWOWii, .-_Z59__device_stub__Z19setup_random_kernelP17curandStateXORWOWiiP17curandStateXORWOWii
.globl _Z19setup_random_kernelP17curandStateXORWOWii
.type _Z19setup_random_kernelP17curandStateXORWOWii, @function
_Z19setup_random_kernelP17curandStateXORWOWii:
.LFB2296:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z59__device_stub__Z19setup_random_kernelP17curandStateXORWOWiiP17curandStateXORWOWii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2296:
.size _Z19setup_random_kernelP17curandStateXORWOWii, .-_Z19setup_random_kernelP17curandStateXORWOWii
.globl _Z57__device_stub__Z16get_random_arrayP17curandStateXORWOWiPdP17curandStateXORWOWiPd
.type _Z57__device_stub__Z16get_random_arrayP17curandStateXORWOWiPdP17curandStateXORWOWiPd, @function
_Z57__device_stub__Z16get_random_arrayP17curandStateXORWOWiPdP17curandStateXORWOWiPd:
.LFB2297:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16get_random_arrayP17curandStateXORWOWiPd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2297:
.size _Z57__device_stub__Z16get_random_arrayP17curandStateXORWOWiPdP17curandStateXORWOWiPd, .-_Z57__device_stub__Z16get_random_arrayP17curandStateXORWOWiPdP17curandStateXORWOWiPd
.globl _Z16get_random_arrayP17curandStateXORWOWiPd
.type _Z16get_random_arrayP17curandStateXORWOWiPd, @function
_Z16get_random_arrayP17curandStateXORWOWiPd:
.LFB2298:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z57__device_stub__Z16get_random_arrayP17curandStateXORWOWiPdP17curandStateXORWOWiPd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2298:
.size _Z16get_random_arrayP17curandStateXORWOWiPd, .-_Z16get_random_arrayP17curandStateXORWOWiPd
.globl _Z62__device_stub__Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWiPdS_S_PiP17curandStateXORWOWi
.type _Z62__device_stub__Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWiPdS_S_PiP17curandStateXORWOWi, @function
_Z62__device_stub__Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWiPdS_S_PiP17curandStateXORWOWi:
.LFB2299:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2299:
.size _Z62__device_stub__Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWiPdS_S_PiP17curandStateXORWOWi, .-_Z62__device_stub__Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWiPdS_S_PiP17curandStateXORWOWi
.globl _Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi
.type _Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi, @function
_Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi:
.LFB2300:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z62__device_stub__Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWiPdS_S_PiP17curandStateXORWOWi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2300:
.size _Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi, .-_Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi
.globl _Z62__device_stub__Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWiPiPdS0_S0_S_P17curandStateXORWOWi
.type _Z62__device_stub__Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWiPiPdS0_S0_S_P17curandStateXORWOWi, @function
_Z62__device_stub__Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWiPiPdS0_S0_S_P17curandStateXORWOWi:
.LFB2301:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2301:
.size _Z62__device_stub__Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWiPiPdS0_S0_S_P17curandStateXORWOWi, .-_Z62__device_stub__Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWiPiPdS0_S0_S_P17curandStateXORWOWi
.globl _Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi
.type _Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi, @function
_Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi:
.LFB2302:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z62__device_stub__Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWiPiPdS0_S0_S_P17curandStateXORWOWi
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2302:
.size _Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi, .-_Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi
.globl _Z64__device_stub__Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWiPiS_PdS0_S0_S_P17curandStateXORWOWi
.type _Z64__device_stub__Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWiPiS_PdS0_S0_S_P17curandStateXORWOWi, @function
_Z64__device_stub__Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWiPiS_PdS0_S0_S_P17curandStateXORWOWi:
.LFB2303:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movq %r8, 24(%rsp)
movq %r9, 16(%rsp)
movq 224(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 24(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 8(%rsp), %rax
movq %rax, 176(%rsp)
leaq 232(%rsp), %rax
movq %rax, 184(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2303:
.size _Z64__device_stub__Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWiPiS_PdS0_S0_S_P17curandStateXORWOWi, .-_Z64__device_stub__Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWiPiS_PdS0_S0_S_P17curandStateXORWOWi
.globl _Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi
.type _Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi, @function
_Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi:
.LFB2304:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z64__device_stub__Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWiPiS_PdS0_S0_S_P17curandStateXORWOWi
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2304:
.size _Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi, .-_Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi
.globl _Z59__device_stub__Z14phase_two_fillPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi
.type _Z59__device_stub__Z14phase_two_fillPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi, @function
_Z59__device_stub__Z14phase_two_fillPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi:
.LFB2305:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L47
.L43:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L48
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z14phase_two_fillPdS_S_P17curandStateXORWOWi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L43
.L48:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2305:
.size _Z59__device_stub__Z14phase_two_fillPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi, .-_Z59__device_stub__Z14phase_two_fillPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi
.globl _Z14phase_two_fillPdS_S_P17curandStateXORWOWi
.type _Z14phase_two_fillPdS_S_P17curandStateXORWOWi, @function
_Z14phase_two_fillPdS_S_P17curandStateXORWOWi:
.LFB2306:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z59__device_stub__Z14phase_two_fillPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2306:
.size _Z14phase_two_fillPdS_S_P17curandStateXORWOWi, .-_Z14phase_two_fillPdS_S_P17curandStateXORWOWi
.globl _Z59__device_stub__Z14phase_two_bulkPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi
.type _Z59__device_stub__Z14phase_two_bulkPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi, @function
_Z59__device_stub__Z14phase_two_bulkPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi:
.LFB2307:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L55
.L51:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L56
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L55:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z14phase_two_bulkPdS_S_P17curandStateXORWOWi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L51
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2307:
.size _Z59__device_stub__Z14phase_two_bulkPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi, .-_Z59__device_stub__Z14phase_two_bulkPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi
.globl _Z14phase_two_bulkPdS_S_P17curandStateXORWOWi
.type _Z14phase_two_bulkPdS_S_P17curandStateXORWOWi, @function
_Z14phase_two_bulkPdS_S_P17curandStateXORWOWi:
.LFB2308:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z59__device_stub__Z14phase_two_bulkPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2308:
.size _Z14phase_two_bulkPdS_S_P17curandStateXORWOWi, .-_Z14phase_two_bulkPdS_S_P17curandStateXORWOWi
.globl _Z58__device_stub__Z11phase_two_dPdS_PiS_P17curandStateXORWOWiPdS_PiS_P17curandStateXORWOWi
.type _Z58__device_stub__Z11phase_two_dPdS_PiS_P17curandStateXORWOWiPdS_PiS_P17curandStateXORWOWi, @function
_Z58__device_stub__Z11phase_two_dPdS_PiS_P17curandStateXORWOWiPdS_PiS_P17curandStateXORWOWi:
.LFB2309:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L63
.L59:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L64
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L63:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z11phase_two_dPdS_PiS_P17curandStateXORWOWi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L59
.L64:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2309:
.size _Z58__device_stub__Z11phase_two_dPdS_PiS_P17curandStateXORWOWiPdS_PiS_P17curandStateXORWOWi, .-_Z58__device_stub__Z11phase_two_dPdS_PiS_P17curandStateXORWOWiPdS_PiS_P17curandStateXORWOWi
.globl _Z11phase_two_dPdS_PiS_P17curandStateXORWOWi
.type _Z11phase_two_dPdS_PiS_P17curandStateXORWOWi, @function
_Z11phase_two_dPdS_PiS_P17curandStateXORWOWi:
.LFB2310:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z58__device_stub__Z11phase_two_dPdS_PiS_P17curandStateXORWOWiPdS_PiS_P17curandStateXORWOWi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2310:
.size _Z11phase_two_dPdS_PiS_P17curandStateXORWOWi, .-_Z11phase_two_dPdS_PiS_P17curandStateXORWOWi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z11phase_two_dPdS_PiS_P17curandStateXORWOWi"
.align 8
.LC1:
.string "_Z14phase_two_bulkPdS_S_P17curandStateXORWOWi"
.align 8
.LC2:
.string "_Z14phase_two_fillPdS_S_P17curandStateXORWOWi"
.align 8
.LC3:
.string "_Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi"
.align 8
.LC4:
.string "_Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi"
.align 8
.LC5:
.string "_Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi"
.align 8
.LC6:
.string "_Z16get_random_arrayP17curandStateXORWOWiPd"
.align 8
.LC7:
.string "_Z19setup_random_kernelP17curandStateXORWOWii"
.section .rodata.str1.1,"aMS",@progbits,1
.LC8:
.string "precalc_xorwow_matrix"
.LC9:
.string "precalc_xorwow_offset_matrix"
.LC10:
.string "mrg32k3aM1"
.LC11:
.string "mrg32k3aM2"
.LC12:
.string "mrg32k3aM1SubSeq"
.LC13:
.string "mrg32k3aM2SubSeq"
.LC14:
.string "mrg32k3aM1Seq"
.LC15:
.string "mrg32k3aM2Seq"
.LC16:
.string "__cr_lgamma_table"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2312:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z11phase_two_dPdS_PiS_P17curandStateXORWOWi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z14phase_two_bulkPdS_S_P17curandStateXORWOWi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z14phase_two_fillPdS_S_P17curandStateXORWOWi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z16get_random_arrayP17curandStateXORWOWiPd(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z19setup_random_kernelP17curandStateXORWOWii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21precalc_xorwow_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM1(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM2(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM1Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM2Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $72, %r9d
movl $0, %r8d
leaq .LC16(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17__cr_lgamma_table(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2312:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL17__cr_lgamma_table
.comm _ZL17__cr_lgamma_table,72,32
.local _ZL13mrg32k3aM2Seq
.comm _ZL13mrg32k3aM2Seq,2304,32
.local _ZL13mrg32k3aM1Seq
.comm _ZL13mrg32k3aM1Seq,2304,32
.local _ZL16mrg32k3aM2SubSeq
.comm _ZL16mrg32k3aM2SubSeq,2016,32
.local _ZL16mrg32k3aM1SubSeq
.comm _ZL16mrg32k3aM1SubSeq,2016,32
.local _ZL10mrg32k3aM2
.comm _ZL10mrg32k3aM2,2304,32
.local _ZL10mrg32k3aM1
.comm _ZL10mrg32k3aM1,2304,32
.local _ZL28precalc_xorwow_offset_matrix
.comm _ZL28precalc_xorwow_offset_matrix,102400,32
.local _ZL21precalc_xorwow_matrix
.comm _ZL21precalc_xorwow_matrix,102400,32
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <ctime>
__global__ void
setup_random_kernel(curandState *state, int length, int offset) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curand_init((unsigned long long) clock(), idx, 0, &state[idx]);
}
}
__global__ void
get_random_array(curandState *state, int length, double *out_array) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
out_array[idx] = curand_uniform_double(&localState);
}
}
/*
* PHASE ONE
*/
__global__ void
phase_one_shift(double *block_b, double *block_i, double *block_n, int *shift, curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Compute block and compute its offset
if (idx < length) {
curandState localState = state[idx];
shift[idx] = (int) __double2int_rn(
block_i[idx] + __double2int_rd(curand_uniform(&localState) * block_b[idx]) * block_n[idx]);
state[idx] = localState;
}
}
__global__ void
phase_one_i(int *i, double *block_b, double *block_i, double *block_n, int *shift, curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Choose first node
if (idx < length) {
curandState localState = state[idx];
i[idx] = (int) __double2int_rn(__double2int_rd(curand_uniform(&localState) * block_n[idx]) + shift[idx]);
state[idx] = localState;
}
}
__global__ void
phase_one_j(int *i, int *j, double *block_b, double *block_i, double *block_n, int *shift,
curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
// Choose second node
// "Without replacement"
j[idx] = (int) __double2int_rn(__double2int_rd(curand_uniform(&localState) * (block_n[idx] - 1)) + shift[idx]);
// Remove loops
if (j[idx] >= i[idx]) {
++j[idx];
}
state[idx] = localState;
}
}
/*
* PHASE TWO
*/
__global__ void
phase_two_fill(double *phase_two_shift_fill, double *phase_two_sz_fill, double *phase_two_fill,
curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
phase_two_fill[idx] =
phase_two_shift_fill[idx] + __double2int_rd(curand_uniform(&localState) * phase_two_sz_fill[idx]);
state[idx] = localState;
}
}
__global__ void
phase_two_bulk(double *phase_two_shift_bulk, double *phase_two_sz_bulk, double *phase_two_bulk,
curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
phase_two_bulk[idx] =
phase_two_shift_bulk[idx] + __double2int_rd(curand_uniform(&localState) * phase_two_sz_bulk[idx]);
state[idx] = localState;
}
}
__global__ void
phase_two_d(double *phase_two_fill, double *phase_two_bulk, int *phase_two, double *phase_two_rd_fill,
curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
if (curand_uniform(&localState) < phase_two_rd_fill[idx]) {
phase_two[idx] = (int) __double2int_rn(phase_two_fill[idx]);
} else {
phase_two[idx] = (int) __double2int_rn(phase_two_bulk[idx]);
}
state[idx] = localState;
}
} | #include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <ctime>
__global__ void
setup_random_kernel(hiprandState *state, int length, int offset) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
hiprand_init((unsigned long long) clock(), idx, 0, &state[idx]);
}
}
__global__ void
get_random_array(hiprandState *state, int length, double *out_array) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
hiprandState localState = state[idx];
out_array[idx] = hiprand_uniform_double(&localState);
}
}
/*
* PHASE ONE
*/
__global__ void
phase_one_shift(double *block_b, double *block_i, double *block_n, int *shift, hiprandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Compute block and compute its offset
if (idx < length) {
hiprandState localState = state[idx];
shift[idx] = (int) __double2int_rn(
block_i[idx] + __double2int_rd(hiprand_uniform(&localState) * block_b[idx]) * block_n[idx]);
state[idx] = localState;
}
}
__global__ void
phase_one_i(int *i, double *block_b, double *block_i, double *block_n, int *shift, hiprandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Choose first node
if (idx < length) {
hiprandState localState = state[idx];
i[idx] = (int) __double2int_rn(__double2int_rd(hiprand_uniform(&localState) * block_n[idx]) + shift[idx]);
state[idx] = localState;
}
}
__global__ void
phase_one_j(int *i, int *j, double *block_b, double *block_i, double *block_n, int *shift,
hiprandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
hiprandState localState = state[idx];
// Choose second node
// "Without replacement"
j[idx] = (int) __double2int_rn(__double2int_rd(hiprand_uniform(&localState) * (block_n[idx] - 1)) + shift[idx]);
// Remove loops
if (j[idx] >= i[idx]) {
++j[idx];
}
state[idx] = localState;
}
}
/*
* PHASE TWO
*/
__global__ void
phase_two_fill(double *phase_two_shift_fill, double *phase_two_sz_fill, double *phase_two_fill,
hiprandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
hiprandState localState = state[idx];
phase_two_fill[idx] =
phase_two_shift_fill[idx] + __double2int_rd(hiprand_uniform(&localState) * phase_two_sz_fill[idx]);
state[idx] = localState;
}
}
__global__ void
phase_two_bulk(double *phase_two_shift_bulk, double *phase_two_sz_bulk, double *phase_two_bulk,
hiprandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
hiprandState localState = state[idx];
phase_two_bulk[idx] =
phase_two_shift_bulk[idx] + __double2int_rd(hiprand_uniform(&localState) * phase_two_sz_bulk[idx]);
state[idx] = localState;
}
}
__global__ void
phase_two_d(double *phase_two_fill, double *phase_two_bulk, int *phase_two, double *phase_two_rd_fill,
hiprandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
hiprandState localState = state[idx];
if (hiprand_uniform(&localState) < phase_two_rd_fill[idx]) {
phase_two[idx] = (int) __double2int_rn(phase_two_fill[idx]);
} else {
phase_two[idx] = (int) __double2int_rn(phase_two_bulk[idx]);
}
state[idx] = localState;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <ctime>
__global__ void
setup_random_kernel(hiprandState *state, int length, int offset) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
hiprand_init((unsigned long long) clock(), idx, 0, &state[idx]);
}
}
__global__ void
get_random_array(hiprandState *state, int length, double *out_array) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
hiprandState localState = state[idx];
out_array[idx] = hiprand_uniform_double(&localState);
}
}
/*
* PHASE ONE
*/
__global__ void
phase_one_shift(double *block_b, double *block_i, double *block_n, int *shift, hiprandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Compute block and compute its offset
if (idx < length) {
hiprandState localState = state[idx];
shift[idx] = (int) __double2int_rn(
block_i[idx] + __double2int_rd(hiprand_uniform(&localState) * block_b[idx]) * block_n[idx]);
state[idx] = localState;
}
}
__global__ void
phase_one_i(int *i, double *block_b, double *block_i, double *block_n, int *shift, hiprandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Choose first node
if (idx < length) {
hiprandState localState = state[idx];
i[idx] = (int) __double2int_rn(__double2int_rd(hiprand_uniform(&localState) * block_n[idx]) + shift[idx]);
state[idx] = localState;
}
}
__global__ void
phase_one_j(int *i, int *j, double *block_b, double *block_i, double *block_n, int *shift,
hiprandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
hiprandState localState = state[idx];
// Choose second node
// "Without replacement"
j[idx] = (int) __double2int_rn(__double2int_rd(hiprand_uniform(&localState) * (block_n[idx] - 1)) + shift[idx]);
// Remove loops
if (j[idx] >= i[idx]) {
++j[idx];
}
state[idx] = localState;
}
}
/*
* PHASE TWO
*/
__global__ void
phase_two_fill(double *phase_two_shift_fill, double *phase_two_sz_fill, double *phase_two_fill,
hiprandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
hiprandState localState = state[idx];
phase_two_fill[idx] =
phase_two_shift_fill[idx] + __double2int_rd(hiprand_uniform(&localState) * phase_two_sz_fill[idx]);
state[idx] = localState;
}
}
__global__ void
phase_two_bulk(double *phase_two_shift_bulk, double *phase_two_sz_bulk, double *phase_two_bulk,
hiprandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
hiprandState localState = state[idx];
phase_two_bulk[idx] =
phase_two_shift_bulk[idx] + __double2int_rd(hiprand_uniform(&localState) * phase_two_sz_bulk[idx]);
state[idx] = localState;
}
}
__global__ void
phase_two_d(double *phase_two_fill, double *phase_two_bulk, int *phase_two, double *phase_two_rd_fill,
hiprandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
hiprandState localState = state[idx];
if (hiprand_uniform(&localState) < phase_two_rd_fill[idx]) {
phase_two[idx] = (int) __double2int_rn(phase_two_fill[idx]);
} else {
phase_two[idx] = (int) __double2int_rn(phase_two_bulk[idx]);
}
state[idx] = localState;
}
} | .text
.file "PhasesKernel.hip"
.globl _Z34__device_stub__setup_random_kernelP12hiprandStateii # -- Begin function _Z34__device_stub__setup_random_kernelP12hiprandStateii
.p2align 4, 0x90
.type _Z34__device_stub__setup_random_kernelP12hiprandStateii,@function
_Z34__device_stub__setup_random_kernelP12hiprandStateii: # @_Z34__device_stub__setup_random_kernelP12hiprandStateii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z19setup_random_kernelP12hiprandStateii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z34__device_stub__setup_random_kernelP12hiprandStateii, .Lfunc_end0-_Z34__device_stub__setup_random_kernelP12hiprandStateii
.cfi_endproc
# -- End function
.globl _Z31__device_stub__get_random_arrayP12hiprandStateiPd # -- Begin function _Z31__device_stub__get_random_arrayP12hiprandStateiPd
.p2align 4, 0x90
.type _Z31__device_stub__get_random_arrayP12hiprandStateiPd,@function
_Z31__device_stub__get_random_arrayP12hiprandStateiPd: # @_Z31__device_stub__get_random_arrayP12hiprandStateiPd
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movq %rdx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16get_random_arrayP12hiprandStateiPd, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z31__device_stub__get_random_arrayP12hiprandStateiPd, .Lfunc_end1-_Z31__device_stub__get_random_arrayP12hiprandStateiPd
.cfi_endproc
# -- End function
.globl _Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei # -- Begin function _Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei
.p2align 4, 0x90
.type _Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei,@function
_Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei: # @_Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movl %r9d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15phase_one_shiftPdS_S_PiP12hiprandStatei, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei, .Lfunc_end2-_Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei
.cfi_endproc
# -- End function
.globl _Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei # -- Begin function _Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei
.p2align 4, 0x90
.type _Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei,@function
_Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei: # @_Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end3:
.size _Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei, .Lfunc_end3-_Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei
.cfi_endproc
# -- End function
.globl _Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei # -- Begin function _Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.p2align 4, 0x90
.type _Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei,@function
_Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei: # @_Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end4:
.size _Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei, .Lfunc_end4-_Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.cfi_endproc
# -- End function
.globl _Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei # -- Begin function _Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei
.p2align 4, 0x90
.type _Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei,@function
_Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei: # @_Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z14phase_two_fillPdS_S_P12hiprandStatei, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end5:
.size _Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei, .Lfunc_end5-_Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei
.cfi_endproc
# -- End function
.globl _Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei # -- Begin function _Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei
.p2align 4, 0x90
.type _Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei,@function
_Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei: # @_Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z14phase_two_bulkPdS_S_P12hiprandStatei, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end6:
.size _Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei, .Lfunc_end6-_Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei
.cfi_endproc
# -- End function
.globl _Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei # -- Begin function _Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei
.p2align 4, 0x90
.type _Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei,@function
_Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei: # @_Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movl %r9d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11phase_two_dPdS_PiS_P12hiprandStatei, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end7:
.size _Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei, .Lfunc_end7-_Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19setup_random_kernelP12hiprandStateii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16get_random_arrayP12hiprandStateiPd, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15phase_one_shiftPdS_S_PiP12hiprandStatei, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14phase_two_fillPdS_S_P12hiprandStatei, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14phase_two_bulkPdS_S_P12hiprandStatei, %esi
movl $.L__unnamed_7, %edx
movl $.L__unnamed_7, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11phase_two_dPdS_PiS_P12hiprandStatei, %esi
movl $.L__unnamed_8, %edx
movl $.L__unnamed_8, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19setup_random_kernelP12hiprandStateii,@object # @_Z19setup_random_kernelP12hiprandStateii
.section .rodata,"a",@progbits
.globl _Z19setup_random_kernelP12hiprandStateii
.p2align 3, 0x0
_Z19setup_random_kernelP12hiprandStateii:
.quad _Z34__device_stub__setup_random_kernelP12hiprandStateii
.size _Z19setup_random_kernelP12hiprandStateii, 8
.type _Z16get_random_arrayP12hiprandStateiPd,@object # @_Z16get_random_arrayP12hiprandStateiPd
.globl _Z16get_random_arrayP12hiprandStateiPd
.p2align 3, 0x0
_Z16get_random_arrayP12hiprandStateiPd:
.quad _Z31__device_stub__get_random_arrayP12hiprandStateiPd
.size _Z16get_random_arrayP12hiprandStateiPd, 8
.type _Z15phase_one_shiftPdS_S_PiP12hiprandStatei,@object # @_Z15phase_one_shiftPdS_S_PiP12hiprandStatei
.globl _Z15phase_one_shiftPdS_S_PiP12hiprandStatei
.p2align 3, 0x0
_Z15phase_one_shiftPdS_S_PiP12hiprandStatei:
.quad _Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei
.size _Z15phase_one_shiftPdS_S_PiP12hiprandStatei, 8
.type _Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei,@object # @_Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei
.globl _Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei
.p2align 3, 0x0
_Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei:
.quad _Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei
.size _Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei, 8
.type _Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei,@object # @_Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.globl _Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.p2align 3, 0x0
_Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei:
.quad _Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.size _Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei, 8
.type _Z14phase_two_fillPdS_S_P12hiprandStatei,@object # @_Z14phase_two_fillPdS_S_P12hiprandStatei
.globl _Z14phase_two_fillPdS_S_P12hiprandStatei
.p2align 3, 0x0
_Z14phase_two_fillPdS_S_P12hiprandStatei:
.quad _Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei
.size _Z14phase_two_fillPdS_S_P12hiprandStatei, 8
.type _Z14phase_two_bulkPdS_S_P12hiprandStatei,@object # @_Z14phase_two_bulkPdS_S_P12hiprandStatei
.globl _Z14phase_two_bulkPdS_S_P12hiprandStatei
.p2align 3, 0x0
_Z14phase_two_bulkPdS_S_P12hiprandStatei:
.quad _Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei
.size _Z14phase_two_bulkPdS_S_P12hiprandStatei, 8
.type _Z11phase_two_dPdS_PiS_P12hiprandStatei,@object # @_Z11phase_two_dPdS_PiS_P12hiprandStatei
.globl _Z11phase_two_dPdS_PiS_P12hiprandStatei
.p2align 3, 0x0
_Z11phase_two_dPdS_PiS_P12hiprandStatei:
.quad _Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei
.size _Z11phase_two_dPdS_PiS_P12hiprandStatei, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19setup_random_kernelP12hiprandStateii"
.size .L__unnamed_1, 41
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z16get_random_arrayP12hiprandStateiPd"
.size .L__unnamed_2, 39
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z15phase_one_shiftPdS_S_PiP12hiprandStatei"
.size .L__unnamed_3, 44
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei"
.size .L__unnamed_4, 44
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "_Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei"
.size .L__unnamed_5, 46
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "_Z14phase_two_fillPdS_S_P12hiprandStatei"
.size .L__unnamed_6, 41
.type .L__unnamed_7,@object # @6
.L__unnamed_7:
.asciz "_Z14phase_two_bulkPdS_S_P12hiprandStatei"
.size .L__unnamed_7, 41
.type .L__unnamed_8,@object # @7
.L__unnamed_8:
.asciz "_Z11phase_two_dPdS_PiS_P12hiprandStatei"
.size .L__unnamed_8, 40
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__setup_random_kernelP12hiprandStateii
.addrsig_sym _Z31__device_stub__get_random_arrayP12hiprandStateiPd
.addrsig_sym _Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei
.addrsig_sym _Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei
.addrsig_sym _Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.addrsig_sym _Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei
.addrsig_sym _Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei
.addrsig_sym _Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19setup_random_kernelP12hiprandStateii
.addrsig_sym _Z16get_random_arrayP12hiprandStateiPd
.addrsig_sym _Z15phase_one_shiftPdS_S_PiP12hiprandStatei
.addrsig_sym _Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei
.addrsig_sym _Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.addrsig_sym _Z14phase_two_fillPdS_S_P12hiprandStatei
.addrsig_sym _Z14phase_two_bulkPdS_S_P12hiprandStatei
.addrsig_sym _Z11phase_two_dPdS_PiS_P12hiprandStatei
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00028b01_00000000-6_PhasesKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2273:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2273:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z59__device_stub__Z19setup_random_kernelP17curandStateXORWOWiiP17curandStateXORWOWii
.type _Z59__device_stub__Z19setup_random_kernelP17curandStateXORWOWiiP17curandStateXORWOWii, @function
_Z59__device_stub__Z19setup_random_kernelP17curandStateXORWOWiiP17curandStateXORWOWii:
.LFB2295:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z19setup_random_kernelP17curandStateXORWOWii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2295:
.size _Z59__device_stub__Z19setup_random_kernelP17curandStateXORWOWiiP17curandStateXORWOWii, .-_Z59__device_stub__Z19setup_random_kernelP17curandStateXORWOWiiP17curandStateXORWOWii
.globl _Z19setup_random_kernelP17curandStateXORWOWii
.type _Z19setup_random_kernelP17curandStateXORWOWii, @function
_Z19setup_random_kernelP17curandStateXORWOWii:
.LFB2296:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z59__device_stub__Z19setup_random_kernelP17curandStateXORWOWiiP17curandStateXORWOWii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2296:
.size _Z19setup_random_kernelP17curandStateXORWOWii, .-_Z19setup_random_kernelP17curandStateXORWOWii
.globl _Z57__device_stub__Z16get_random_arrayP17curandStateXORWOWiPdP17curandStateXORWOWiPd
.type _Z57__device_stub__Z16get_random_arrayP17curandStateXORWOWiPdP17curandStateXORWOWiPd, @function
_Z57__device_stub__Z16get_random_arrayP17curandStateXORWOWiPdP17curandStateXORWOWiPd:
.LFB2297:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16get_random_arrayP17curandStateXORWOWiPd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2297:
.size _Z57__device_stub__Z16get_random_arrayP17curandStateXORWOWiPdP17curandStateXORWOWiPd, .-_Z57__device_stub__Z16get_random_arrayP17curandStateXORWOWiPdP17curandStateXORWOWiPd
.globl _Z16get_random_arrayP17curandStateXORWOWiPd
.type _Z16get_random_arrayP17curandStateXORWOWiPd, @function
_Z16get_random_arrayP17curandStateXORWOWiPd:
.LFB2298:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z57__device_stub__Z16get_random_arrayP17curandStateXORWOWiPdP17curandStateXORWOWiPd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2298:
.size _Z16get_random_arrayP17curandStateXORWOWiPd, .-_Z16get_random_arrayP17curandStateXORWOWiPd
.globl _Z62__device_stub__Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWiPdS_S_PiP17curandStateXORWOWi
.type _Z62__device_stub__Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWiPdS_S_PiP17curandStateXORWOWi, @function
_Z62__device_stub__Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWiPdS_S_PiP17curandStateXORWOWi:
.LFB2299:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2299:
.size _Z62__device_stub__Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWiPdS_S_PiP17curandStateXORWOWi, .-_Z62__device_stub__Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWiPdS_S_PiP17curandStateXORWOWi
.globl _Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi
.type _Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi, @function
_Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi:
.LFB2300:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z62__device_stub__Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWiPdS_S_PiP17curandStateXORWOWi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2300:
.size _Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi, .-_Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi
.globl _Z62__device_stub__Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWiPiPdS0_S0_S_P17curandStateXORWOWi
.type _Z62__device_stub__Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWiPiPdS0_S0_S_P17curandStateXORWOWi, @function
_Z62__device_stub__Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWiPiPdS0_S0_S_P17curandStateXORWOWi:
.LFB2301:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2301:
.size _Z62__device_stub__Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWiPiPdS0_S0_S_P17curandStateXORWOWi, .-_Z62__device_stub__Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWiPiPdS0_S0_S_P17curandStateXORWOWi
.globl _Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi
.type _Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi, @function
_Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi:
.LFB2302:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z62__device_stub__Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWiPiPdS0_S0_S_P17curandStateXORWOWi
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2302:
.size _Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi, .-_Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi
.globl _Z64__device_stub__Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWiPiS_PdS0_S0_S_P17curandStateXORWOWi
.type _Z64__device_stub__Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWiPiS_PdS0_S0_S_P17curandStateXORWOWi, @function
_Z64__device_stub__Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWiPiS_PdS0_S0_S_P17curandStateXORWOWi:
.LFB2303:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movq %r8, 24(%rsp)
movq %r9, 16(%rsp)
movq 224(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 24(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 8(%rsp), %rax
movq %rax, 176(%rsp)
leaq 232(%rsp), %rax
movq %rax, 184(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2303:
.size _Z64__device_stub__Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWiPiS_PdS0_S0_S_P17curandStateXORWOWi, .-_Z64__device_stub__Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWiPiS_PdS0_S0_S_P17curandStateXORWOWi
.globl _Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi
.type _Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi, @function
_Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi:
.LFB2304:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z64__device_stub__Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWiPiS_PdS0_S0_S_P17curandStateXORWOWi
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2304:
.size _Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi, .-_Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi
.globl _Z59__device_stub__Z14phase_two_fillPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi
.type _Z59__device_stub__Z14phase_two_fillPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi, @function
_Z59__device_stub__Z14phase_two_fillPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi:
.LFB2305:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L47
.L43:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L48
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z14phase_two_fillPdS_S_P17curandStateXORWOWi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L43
.L48:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2305:
.size _Z59__device_stub__Z14phase_two_fillPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi, .-_Z59__device_stub__Z14phase_two_fillPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi
.globl _Z14phase_two_fillPdS_S_P17curandStateXORWOWi
.type _Z14phase_two_fillPdS_S_P17curandStateXORWOWi, @function
_Z14phase_two_fillPdS_S_P17curandStateXORWOWi:
.LFB2306:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z59__device_stub__Z14phase_two_fillPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2306:
.size _Z14phase_two_fillPdS_S_P17curandStateXORWOWi, .-_Z14phase_two_fillPdS_S_P17curandStateXORWOWi
.globl _Z59__device_stub__Z14phase_two_bulkPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi
.type _Z59__device_stub__Z14phase_two_bulkPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi, @function
_Z59__device_stub__Z14phase_two_bulkPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi:
.LFB2307:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L55
.L51:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L56
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L55:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z14phase_two_bulkPdS_S_P17curandStateXORWOWi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L51
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2307:
.size _Z59__device_stub__Z14phase_two_bulkPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi, .-_Z59__device_stub__Z14phase_two_bulkPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi
.globl _Z14phase_two_bulkPdS_S_P17curandStateXORWOWi
.type _Z14phase_two_bulkPdS_S_P17curandStateXORWOWi, @function
_Z14phase_two_bulkPdS_S_P17curandStateXORWOWi:
.LFB2308:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z59__device_stub__Z14phase_two_bulkPdS_S_P17curandStateXORWOWiPdS_S_P17curandStateXORWOWi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2308:
.size _Z14phase_two_bulkPdS_S_P17curandStateXORWOWi, .-_Z14phase_two_bulkPdS_S_P17curandStateXORWOWi
.globl _Z58__device_stub__Z11phase_two_dPdS_PiS_P17curandStateXORWOWiPdS_PiS_P17curandStateXORWOWi
.type _Z58__device_stub__Z11phase_two_dPdS_PiS_P17curandStateXORWOWiPdS_PiS_P17curandStateXORWOWi, @function
_Z58__device_stub__Z11phase_two_dPdS_PiS_P17curandStateXORWOWiPdS_PiS_P17curandStateXORWOWi:
.LFB2309:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L63
.L59:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L64
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L63:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z11phase_two_dPdS_PiS_P17curandStateXORWOWi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L59
.L64:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2309:
.size _Z58__device_stub__Z11phase_two_dPdS_PiS_P17curandStateXORWOWiPdS_PiS_P17curandStateXORWOWi, .-_Z58__device_stub__Z11phase_two_dPdS_PiS_P17curandStateXORWOWiPdS_PiS_P17curandStateXORWOWi
.globl _Z11phase_two_dPdS_PiS_P17curandStateXORWOWi
.type _Z11phase_two_dPdS_PiS_P17curandStateXORWOWi, @function
_Z11phase_two_dPdS_PiS_P17curandStateXORWOWi:
.LFB2310:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z58__device_stub__Z11phase_two_dPdS_PiS_P17curandStateXORWOWiPdS_PiS_P17curandStateXORWOWi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2310:
.size _Z11phase_two_dPdS_PiS_P17curandStateXORWOWi, .-_Z11phase_two_dPdS_PiS_P17curandStateXORWOWi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z11phase_two_dPdS_PiS_P17curandStateXORWOWi"
.align 8
.LC1:
.string "_Z14phase_two_bulkPdS_S_P17curandStateXORWOWi"
.align 8
.LC2:
.string "_Z14phase_two_fillPdS_S_P17curandStateXORWOWi"
.align 8
.LC3:
.string "_Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi"
.align 8
.LC4:
.string "_Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi"
.align 8
.LC5:
.string "_Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi"
.align 8
.LC6:
.string "_Z16get_random_arrayP17curandStateXORWOWiPd"
.align 8
.LC7:
.string "_Z19setup_random_kernelP17curandStateXORWOWii"
.section .rodata.str1.1,"aMS",@progbits,1
.LC8:
.string "precalc_xorwow_matrix"
.LC9:
.string "precalc_xorwow_offset_matrix"
.LC10:
.string "mrg32k3aM1"
.LC11:
.string "mrg32k3aM2"
.LC12:
.string "mrg32k3aM1SubSeq"
.LC13:
.string "mrg32k3aM2SubSeq"
.LC14:
.string "mrg32k3aM1Seq"
.LC15:
.string "mrg32k3aM2Seq"
.LC16:
.string "__cr_lgamma_table"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2312:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z11phase_two_dPdS_PiS_P17curandStateXORWOWi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z14phase_two_bulkPdS_S_P17curandStateXORWOWi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z14phase_two_fillPdS_S_P17curandStateXORWOWi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z11phase_one_jPiS_PdS0_S0_S_P17curandStateXORWOWi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z11phase_one_iPiPdS0_S0_S_P17curandStateXORWOWi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z15phase_one_shiftPdS_S_PiP17curandStateXORWOWi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z16get_random_arrayP17curandStateXORWOWiPd(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z19setup_random_kernelP17curandStateXORWOWii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21precalc_xorwow_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM1(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM2(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM1Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM2Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $72, %r9d
movl $0, %r8d
leaq .LC16(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17__cr_lgamma_table(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2312:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL17__cr_lgamma_table
.comm _ZL17__cr_lgamma_table,72,32
.local _ZL13mrg32k3aM2Seq
.comm _ZL13mrg32k3aM2Seq,2304,32
.local _ZL13mrg32k3aM1Seq
.comm _ZL13mrg32k3aM1Seq,2304,32
.local _ZL16mrg32k3aM2SubSeq
.comm _ZL16mrg32k3aM2SubSeq,2016,32
.local _ZL16mrg32k3aM1SubSeq
.comm _ZL16mrg32k3aM1SubSeq,2016,32
.local _ZL10mrg32k3aM2
.comm _ZL10mrg32k3aM2,2304,32
.local _ZL10mrg32k3aM1
.comm _ZL10mrg32k3aM1,2304,32
.local _ZL28precalc_xorwow_offset_matrix
.comm _ZL28precalc_xorwow_offset_matrix,102400,32
.local _ZL21precalc_xorwow_matrix
.comm _ZL21precalc_xorwow_matrix,102400,32
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "PhasesKernel.hip"
.globl _Z34__device_stub__setup_random_kernelP12hiprandStateii # -- Begin function _Z34__device_stub__setup_random_kernelP12hiprandStateii
.p2align 4, 0x90
.type _Z34__device_stub__setup_random_kernelP12hiprandStateii,@function
_Z34__device_stub__setup_random_kernelP12hiprandStateii: # @_Z34__device_stub__setup_random_kernelP12hiprandStateii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z19setup_random_kernelP12hiprandStateii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z34__device_stub__setup_random_kernelP12hiprandStateii, .Lfunc_end0-_Z34__device_stub__setup_random_kernelP12hiprandStateii
.cfi_endproc
# -- End function
.globl _Z31__device_stub__get_random_arrayP12hiprandStateiPd # -- Begin function _Z31__device_stub__get_random_arrayP12hiprandStateiPd
.p2align 4, 0x90
.type _Z31__device_stub__get_random_arrayP12hiprandStateiPd,@function
_Z31__device_stub__get_random_arrayP12hiprandStateiPd: # @_Z31__device_stub__get_random_arrayP12hiprandStateiPd
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movq %rdx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16get_random_arrayP12hiprandStateiPd, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z31__device_stub__get_random_arrayP12hiprandStateiPd, .Lfunc_end1-_Z31__device_stub__get_random_arrayP12hiprandStateiPd
.cfi_endproc
# -- End function
.globl _Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei # -- Begin function _Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei
.p2align 4, 0x90
.type _Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei,@function
_Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei: # @_Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movl %r9d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15phase_one_shiftPdS_S_PiP12hiprandStatei, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei, .Lfunc_end2-_Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei
.cfi_endproc
# -- End function
.globl _Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei # -- Begin function _Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei
.p2align 4, 0x90
.type _Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei,@function
_Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei: # @_Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end3:
.size _Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei, .Lfunc_end3-_Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei
.cfi_endproc
# -- End function
.globl _Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei # -- Begin function _Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.p2align 4, 0x90
.type _Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei,@function
_Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei: # @_Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end4:
.size _Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei, .Lfunc_end4-_Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.cfi_endproc
# -- End function
.globl _Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei # -- Begin function _Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei
.p2align 4, 0x90
.type _Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei,@function
_Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei: # @_Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z14phase_two_fillPdS_S_P12hiprandStatei, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end5:
.size _Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei, .Lfunc_end5-_Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei
.cfi_endproc
# -- End function
.globl _Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei # -- Begin function _Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei
.p2align 4, 0x90
.type _Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei,@function
_Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei: # @_Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z14phase_two_bulkPdS_S_P12hiprandStatei, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end6:
.size _Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei, .Lfunc_end6-_Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei
.cfi_endproc
# -- End function
.globl _Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei # -- Begin function _Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei
.p2align 4, 0x90
.type _Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei,@function
_Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei: # @_Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movl %r9d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11phase_two_dPdS_PiS_P12hiprandStatei, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end7:
.size _Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei, .Lfunc_end7-_Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19setup_random_kernelP12hiprandStateii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16get_random_arrayP12hiprandStateiPd, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15phase_one_shiftPdS_S_PiP12hiprandStatei, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14phase_two_fillPdS_S_P12hiprandStatei, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14phase_two_bulkPdS_S_P12hiprandStatei, %esi
movl $.L__unnamed_7, %edx
movl $.L__unnamed_7, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11phase_two_dPdS_PiS_P12hiprandStatei, %esi
movl $.L__unnamed_8, %edx
movl $.L__unnamed_8, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19setup_random_kernelP12hiprandStateii,@object # @_Z19setup_random_kernelP12hiprandStateii
.section .rodata,"a",@progbits
.globl _Z19setup_random_kernelP12hiprandStateii
.p2align 3, 0x0
_Z19setup_random_kernelP12hiprandStateii:
.quad _Z34__device_stub__setup_random_kernelP12hiprandStateii
.size _Z19setup_random_kernelP12hiprandStateii, 8
.type _Z16get_random_arrayP12hiprandStateiPd,@object # @_Z16get_random_arrayP12hiprandStateiPd
.globl _Z16get_random_arrayP12hiprandStateiPd
.p2align 3, 0x0
_Z16get_random_arrayP12hiprandStateiPd:
.quad _Z31__device_stub__get_random_arrayP12hiprandStateiPd
.size _Z16get_random_arrayP12hiprandStateiPd, 8
.type _Z15phase_one_shiftPdS_S_PiP12hiprandStatei,@object # @_Z15phase_one_shiftPdS_S_PiP12hiprandStatei
.globl _Z15phase_one_shiftPdS_S_PiP12hiprandStatei
.p2align 3, 0x0
_Z15phase_one_shiftPdS_S_PiP12hiprandStatei:
.quad _Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei
.size _Z15phase_one_shiftPdS_S_PiP12hiprandStatei, 8
.type _Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei,@object # @_Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei
.globl _Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei
.p2align 3, 0x0
_Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei:
.quad _Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei
.size _Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei, 8
.type _Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei,@object # @_Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.globl _Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.p2align 3, 0x0
_Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei:
.quad _Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.size _Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei, 8
.type _Z14phase_two_fillPdS_S_P12hiprandStatei,@object # @_Z14phase_two_fillPdS_S_P12hiprandStatei
.globl _Z14phase_two_fillPdS_S_P12hiprandStatei
.p2align 3, 0x0
_Z14phase_two_fillPdS_S_P12hiprandStatei:
.quad _Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei
.size _Z14phase_two_fillPdS_S_P12hiprandStatei, 8
.type _Z14phase_two_bulkPdS_S_P12hiprandStatei,@object # @_Z14phase_two_bulkPdS_S_P12hiprandStatei
.globl _Z14phase_two_bulkPdS_S_P12hiprandStatei
.p2align 3, 0x0
_Z14phase_two_bulkPdS_S_P12hiprandStatei:
.quad _Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei
.size _Z14phase_two_bulkPdS_S_P12hiprandStatei, 8
.type _Z11phase_two_dPdS_PiS_P12hiprandStatei,@object # @_Z11phase_two_dPdS_PiS_P12hiprandStatei
.globl _Z11phase_two_dPdS_PiS_P12hiprandStatei
.p2align 3, 0x0
_Z11phase_two_dPdS_PiS_P12hiprandStatei:
.quad _Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei
.size _Z11phase_two_dPdS_PiS_P12hiprandStatei, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19setup_random_kernelP12hiprandStateii"
.size .L__unnamed_1, 41
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z16get_random_arrayP12hiprandStateiPd"
.size .L__unnamed_2, 39
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z15phase_one_shiftPdS_S_PiP12hiprandStatei"
.size .L__unnamed_3, 44
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei"
.size .L__unnamed_4, 44
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "_Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei"
.size .L__unnamed_5, 46
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "_Z14phase_two_fillPdS_S_P12hiprandStatei"
.size .L__unnamed_6, 41
.type .L__unnamed_7,@object # @6
.L__unnamed_7:
.asciz "_Z14phase_two_bulkPdS_S_P12hiprandStatei"
.size .L__unnamed_7, 41
.type .L__unnamed_8,@object # @7
.L__unnamed_8:
.asciz "_Z11phase_two_dPdS_PiS_P12hiprandStatei"
.size .L__unnamed_8, 40
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__setup_random_kernelP12hiprandStateii
.addrsig_sym _Z31__device_stub__get_random_arrayP12hiprandStateiPd
.addrsig_sym _Z30__device_stub__phase_one_shiftPdS_S_PiP12hiprandStatei
.addrsig_sym _Z26__device_stub__phase_one_iPiPdS0_S0_S_P12hiprandStatei
.addrsig_sym _Z26__device_stub__phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.addrsig_sym _Z29__device_stub__phase_two_fillPdS_S_P12hiprandStatei
.addrsig_sym _Z29__device_stub__phase_two_bulkPdS_S_P12hiprandStatei
.addrsig_sym _Z26__device_stub__phase_two_dPdS_PiS_P12hiprandStatei
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19setup_random_kernelP12hiprandStateii
.addrsig_sym _Z16get_random_arrayP12hiprandStateiPd
.addrsig_sym _Z15phase_one_shiftPdS_S_PiP12hiprandStatei
.addrsig_sym _Z11phase_one_iPiPdS0_S0_S_P12hiprandStatei
.addrsig_sym _Z11phase_one_jPiS_PdS0_S0_S_P12hiprandStatei
.addrsig_sym _Z14phase_two_fillPdS_S_P12hiprandStatei
.addrsig_sym _Z14phase_two_bulkPdS_S_P12hiprandStatei
.addrsig_sym _Z11phase_two_dPdS_PiS_P12hiprandStatei
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void slice_sparse_dense_row(double* inVal, int* inRowPtr, int* colInd, double* ret, int rl, int ru, int cl, int cu, int retClen) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int rowIndex = index + rl;
if (rowIndex <= ru){
/*
* TODO: Alternative approach: use dynamic parallelism. We are skipping this for now to avoid
* the complexity of two-step separate compilation and linking process.
*
* extern "C"
* __global__ void slice_sparse_dense_row_helper(double* inVal, int* inRowPtr, int* colInd, double* ret,
* int rl, int ru, int cl, int cu, int retClen, int start, int end, int index) {
* int i = blockIdx.x * blockDim.x + threadIdx.x + start;
* // Only slice if the index falls into the given range
* if(i < end && cl <= colInd[i] && colInd[i] <= cu) {
* ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
* }
* }
*
* int size = inRowPtr[rowIndex+1] - inRowPtr[rowIndex];
* double numThreads = (double)min(size, MAX_NUM_THREADS_CHILD_KERNEL);
* slice_sparse_dense_row_helper<<< ceil(numThreads/ MAX_NUM_THREADS_CHILD_KERNEL), MAX_NUM_THREADS_CHILD_KERNEL>>>(inVal, inRowPtr, colInd, ret,
* rl, ru, cl, cu, retClen, inRowPtr[rowIndex], inRowPtr[rowIndex+1], index);
*
* Two-step compilation and linking process in JCudaKernels's constructor:
* cuLinkAddFile(linkState, CUjitInputType.CU_JIT_INPUT_LIBRARY, "/usr/local/cuda/lib64/libcudadevrt.a", jitOptions);
*/
// Iterate over elements of the row 'rowIndex'.
for(int i = inRowPtr[rowIndex]; i < inRowPtr[rowIndex+1]; i++) {
// Only slice if the index falls into the given range
if(cl <= colInd[i] && colInd[i] <= cu) {
ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
}
}
}
} | code for sm_80
Function : slice_sparse_dense_row
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ IADD3 R2, R0, c[0x0][0x180], RZ ; /* 0x0000600000027a10 */
/* 0x000fc80007ffe0ff */
/*0050*/ ISETP.GT.AND P0, PT, R2, c[0x0][0x184], PT ; /* 0x0000610002007a0c */
/* 0x000fda0003f04270 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fe200078e00ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0090*/ IMAD.WIDE R2, R2, R7, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fca00078e0207 */
/*00a0*/ LDG.E R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R9, [R2.64+0x4] ; /* 0x0000040402097981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ ISETP.GE.AND P0, PT, R4, R9, PT ; /* 0x000000090400720c */
/* 0x004fda0003f06270 */
/*00d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00e0*/ MOV R10, R4 ; /* 0x00000004000a7202 */
/* 0x000fe20000000f00 */
/*00f0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x8 ; /* 0x00000008ff057424 */
/* 0x000fe200078e00ff */
/*0100*/ MOV R11, c[0x0][0x190] ; /* 0x00006400000b7a02 */
/* 0x000fc60000000f00 */
/*0110*/ IMAD.WIDE R4, R10, R5, c[0x0][0x160] ; /* 0x000058000a047625 */
/* 0x000fc800078e0205 */
/*0120*/ IMAD.WIDE R6, R10, R7, c[0x0][0x170] ; /* 0x00005c000a067625 */
/* 0x000fe200078e0207 */
/*0130*/ MOV R15, R5 ; /* 0x00000005000f7202 */
/* 0x000fc60000000f00 */
/*0140*/ IMAD R0, R0, R11, -c[0x0][0x188] ; /* 0x8000620000007624 */
/* 0x000fe200078e020b */
/*0150*/ MOV R13, R7 ; /* 0x00000007000d7202 */
/* 0x000fe20000000f00 */
/*0160*/ IMAD.MOV.U32 R12, RZ, RZ, R4 ; /* 0x000000ffff0c7224 */
/* 0x000fe400078e0004 */
/*0170*/ IMAD.MOV.U32 R8, RZ, RZ, R6 ; /* 0x000000ffff087224 */
/* 0x000fe400078e0006 */
/*0180*/ IMAD.MOV.U32 R11, RZ, RZ, R9 ; /* 0x000000ffff0b7224 */
/* 0x000fe400078e0009 */
/*0190*/ MOV R9, R13 ; /* 0x0000000d00097202 */
/* 0x000fca0000000f00 */
/*01a0*/ LDG.E R7, [R8.64] ; /* 0x0000000408077981 */
/* 0x000ea4000c1e1900 */
/*01b0*/ ISETP.GT.AND P0, PT, R7, c[0x0][0x18c], PT ; /* 0x0000630007007a0c */
/* 0x004fc80003f04270 */
/*01c0*/ ISETP.LT.OR P0, PT, R7, c[0x0][0x188], P0 ; /* 0x0000620007007a0c */
/* 0x000fda0000701670 */
/*01d0*/ @!P0 IMAD.MOV.U32 R4, RZ, RZ, R12 ; /* 0x000000ffff048224 */
/* 0x000fe200078e000c */
/*01e0*/ @!P0 MOV R5, R15 ; /* 0x0000000f00058202 */
/* 0x000fcc0000000f00 */
/*01f0*/ @!P0 LDG.E.64 R4, [R4.64] ; /* 0x0000000404048981 */
/* 0x000ea2000c1e1b00 */
/*0200*/ @!P0 IMAD.IADD R6, R0, 0x1, R7 ; /* 0x0000000100068824 */
/* 0x000fe200078e0207 */
/*0210*/ @!P0 MOV R7, 0x8 ; /* 0x0000000800078802 */
/* 0x000fca0000000f00 */
/*0220*/ @!P0 IMAD.WIDE R6, R6, R7, c[0x0][0x178] ; /* 0x00005e0006068625 */
/* 0x000fca00078e0207 */
/*0230*/ @!P0 STG.E.64 [R6.64], R4 ; /* 0x0000000406008986 */
/* 0x0041e8000c101b04 */
/*0240*/ @!P0 LDG.E R11, [R2.64+0x4] ; /* 0x00000404020b8981 */
/* 0x000ea2000c1e1900 */
/*0250*/ IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0a7810 */
/* 0x000fe40007ffe0ff */
/*0260*/ IADD3 R8, P2, R8, 0x4, RZ ; /* 0x0000000408087810 */
/* 0x000fe40007f5e0ff */
/*0270*/ IADD3 R12, P1, R12, 0x8, RZ ; /* 0x000000080c0c7810 */
/* 0x000fc60007f3e0ff */
/*0280*/ IMAD.X R13, RZ, RZ, R13, P2 ; /* 0x000000ffff0d7224 */
/* 0x000fe200010e060d */
/*0290*/ IADD3.X R15, RZ, R15, RZ, P1, !PT ; /* 0x0000000fff0f7210 */
/* 0x000fe40000ffe4ff */
/*02a0*/ ISETP.GE.AND P0, PT, R10, R11, PT ; /* 0x0000000b0a00720c */
/* 0x004fda0003f06270 */
/*02b0*/ @!P0 BRA 0x190 ; /* 0xfffffed000008947 */
/* 0x001fea000383ffff */
/*02c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02d0*/ BRA 0x2d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void slice_sparse_dense_row(double* inVal, int* inRowPtr, int* colInd, double* ret, int rl, int ru, int cl, int cu, int retClen) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int rowIndex = index + rl;
if (rowIndex <= ru){
/*
* TODO: Alternative approach: use dynamic parallelism. We are skipping this for now to avoid
* the complexity of two-step separate compilation and linking process.
*
* extern "C"
* __global__ void slice_sparse_dense_row_helper(double* inVal, int* inRowPtr, int* colInd, double* ret,
* int rl, int ru, int cl, int cu, int retClen, int start, int end, int index) {
* int i = blockIdx.x * blockDim.x + threadIdx.x + start;
* // Only slice if the index falls into the given range
* if(i < end && cl <= colInd[i] && colInd[i] <= cu) {
* ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
* }
* }
*
* int size = inRowPtr[rowIndex+1] - inRowPtr[rowIndex];
* double numThreads = (double)min(size, MAX_NUM_THREADS_CHILD_KERNEL);
* slice_sparse_dense_row_helper<<< ceil(numThreads/ MAX_NUM_THREADS_CHILD_KERNEL), MAX_NUM_THREADS_CHILD_KERNEL>>>(inVal, inRowPtr, colInd, ret,
* rl, ru, cl, cu, retClen, inRowPtr[rowIndex], inRowPtr[rowIndex+1], index);
*
* Two-step compilation and linking process in JCudaKernels's constructor:
* cuLinkAddFile(linkState, CUjitInputType.CU_JIT_INPUT_LIBRARY, "/usr/local/cuda/lib64/libcudadevrt.a", jitOptions);
*/
// Iterate over elements of the row 'rowIndex'.
for(int i = inRowPtr[rowIndex]; i < inRowPtr[rowIndex+1]; i++) {
// Only slice if the index falls into the given range
if(cl <= colInd[i] && colInd[i] <= cu) {
ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
}
}
}
} | .file "tmpxft_00053c34_00000000-6_slice_sparse_dense_row.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z54__device_stub__Z22slice_sparse_dense_rowPdPiS0_S_iiiiiPdPiS0_S_iiiii
.type _Z54__device_stub__Z22slice_sparse_dense_rowPdPiS0_S_iiiiiPdPiS0_S_iiiii, @function
_Z54__device_stub__Z22slice_sparse_dense_rowPdPiS0_S_iiiiiPdPiS0_S_iiiii:
.LFB2051:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
leaq 224(%rsp), %rax
movq %rax, 176(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq slice_sparse_dense_row(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z54__device_stub__Z22slice_sparse_dense_rowPdPiS0_S_iiiiiPdPiS0_S_iiiii, .-_Z54__device_stub__Z22slice_sparse_dense_rowPdPiS0_S_iiiiiPdPiS0_S_iiiii
.globl slice_sparse_dense_row
.type slice_sparse_dense_row, @function
slice_sparse_dense_row:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
call _Z54__device_stub__Z22slice_sparse_dense_rowPdPiS0_S_iiiiiPdPiS0_S_iiiii
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size slice_sparse_dense_row, .-slice_sparse_dense_row
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "slice_sparse_dense_row"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq slice_sparse_dense_row(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void slice_sparse_dense_row(double* inVal, int* inRowPtr, int* colInd, double* ret, int rl, int ru, int cl, int cu, int retClen) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int rowIndex = index + rl;
if (rowIndex <= ru){
/*
* TODO: Alternative approach: use dynamic parallelism. We are skipping this for now to avoid
* the complexity of two-step separate compilation and linking process.
*
* extern "C"
* __global__ void slice_sparse_dense_row_helper(double* inVal, int* inRowPtr, int* colInd, double* ret,
* int rl, int ru, int cl, int cu, int retClen, int start, int end, int index) {
* int i = blockIdx.x * blockDim.x + threadIdx.x + start;
* // Only slice if the index falls into the given range
* if(i < end && cl <= colInd[i] && colInd[i] <= cu) {
* ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
* }
* }
*
* int size = inRowPtr[rowIndex+1] - inRowPtr[rowIndex];
* double numThreads = (double)min(size, MAX_NUM_THREADS_CHILD_KERNEL);
* slice_sparse_dense_row_helper<<< ceil(numThreads/ MAX_NUM_THREADS_CHILD_KERNEL), MAX_NUM_THREADS_CHILD_KERNEL>>>(inVal, inRowPtr, colInd, ret,
* rl, ru, cl, cu, retClen, inRowPtr[rowIndex], inRowPtr[rowIndex+1], index);
*
* Two-step compilation and linking process in JCudaKernels's constructor:
* cuLinkAddFile(linkState, CUjitInputType.CU_JIT_INPUT_LIBRARY, "/usr/local/cuda/lib64/libcudadevrt.a", jitOptions);
*/
// Iterate over elements of the row 'rowIndex'.
for(int i = inRowPtr[rowIndex]; i < inRowPtr[rowIndex+1]; i++) {
// Only slice if the index falls into the given range
if(cl <= colInd[i] && colInd[i] <= cu) {
ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
}
}
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void slice_sparse_dense_row(double* inVal, int* inRowPtr, int* colInd, double* ret, int rl, int ru, int cl, int cu, int retClen) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int rowIndex = index + rl;
if (rowIndex <= ru){
/*
* TODO: Alternative approach: use dynamic parallelism. We are skipping this for now to avoid
* the complexity of two-step separate compilation and linking process.
*
* extern "C"
* __global__ void slice_sparse_dense_row_helper(double* inVal, int* inRowPtr, int* colInd, double* ret,
* int rl, int ru, int cl, int cu, int retClen, int start, int end, int index) {
* int i = blockIdx.x * blockDim.x + threadIdx.x + start;
* // Only slice if the index falls into the given range
* if(i < end && cl <= colInd[i] && colInd[i] <= cu) {
* ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
* }
* }
*
* int size = inRowPtr[rowIndex+1] - inRowPtr[rowIndex];
* double numThreads = (double)min(size, MAX_NUM_THREADS_CHILD_KERNEL);
* slice_sparse_dense_row_helper<<< ceil(numThreads/ MAX_NUM_THREADS_CHILD_KERNEL), MAX_NUM_THREADS_CHILD_KERNEL>>>(inVal, inRowPtr, colInd, ret,
* rl, ru, cl, cu, retClen, inRowPtr[rowIndex], inRowPtr[rowIndex+1], index);
*
* Two-step compilation and linking process in JCudaKernels's constructor:
* cuLinkAddFile(linkState, CUjitInputType.CU_JIT_INPUT_LIBRARY, "/usr/local/cuda/lib64/libcudadevrt.a", jitOptions);
*/
// Iterate over elements of the row 'rowIndex'.
for(int i = inRowPtr[rowIndex]; i < inRowPtr[rowIndex+1]; i++) {
// Only slice if the index falls into the given range
if(cl <= colInd[i] && colInd[i] <= cu) {
ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
}
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void slice_sparse_dense_row(double* inVal, int* inRowPtr, int* colInd, double* ret, int rl, int ru, int cl, int cu, int retClen) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int rowIndex = index + rl;
if (rowIndex <= ru){
/*
* TODO: Alternative approach: use dynamic parallelism. We are skipping this for now to avoid
* the complexity of two-step separate compilation and linking process.
*
* extern "C"
* __global__ void slice_sparse_dense_row_helper(double* inVal, int* inRowPtr, int* colInd, double* ret,
* int rl, int ru, int cl, int cu, int retClen, int start, int end, int index) {
* int i = blockIdx.x * blockDim.x + threadIdx.x + start;
* // Only slice if the index falls into the given range
* if(i < end && cl <= colInd[i] && colInd[i] <= cu) {
* ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
* }
* }
*
* int size = inRowPtr[rowIndex+1] - inRowPtr[rowIndex];
* double numThreads = (double)min(size, MAX_NUM_THREADS_CHILD_KERNEL);
* slice_sparse_dense_row_helper<<< ceil(numThreads/ MAX_NUM_THREADS_CHILD_KERNEL), MAX_NUM_THREADS_CHILD_KERNEL>>>(inVal, inRowPtr, colInd, ret,
* rl, ru, cl, cu, retClen, inRowPtr[rowIndex], inRowPtr[rowIndex+1], index);
*
* Two-step compilation and linking process in JCudaKernels's constructor:
* cuLinkAddFile(linkState, CUjitInputType.CU_JIT_INPUT_LIBRARY, "/usr/local/cuda/lib64/libcudadevrt.a", jitOptions);
*/
// Iterate over elements of the row 'rowIndex'.
for(int i = inRowPtr[rowIndex]; i < inRowPtr[rowIndex+1]; i++) {
// Only slice if the index falls into the given range
if(cl <= colInd[i] && colInd[i] <= cu) {
ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
}
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected slice_sparse_dense_row
.globl slice_sparse_dense_row
.p2align 8
.type slice_sparse_dense_row,@function
slice_sparse_dense_row:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x44
s_load_b64 s[2:3], s[0:1], 0x20
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, s15, s4, v[0:1]
v_add_nc_u32_e32 v0, s2, v2
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_ge_i32_e64 s3, v0
s_cbranch_execz .LBB0_6
s_load_b64 s[2:3], s[0:1], 0x8
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b64 v[0:1], v[0:1], off
s_waitcnt vmcnt(0)
v_cmp_lt_i32_e32 vcc_lo, v0, v1
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_6
s_clause 0x3
s_load_b32 s8, s[0:1], 0x30
s_load_b128 s[4:7], s[0:1], 0x10
s_load_b64 s[2:3], s[0:1], 0x28
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v4, 31, v0
v_mov_b32_e32 v3, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[5:6], 2, v[3:4]
v_lshlrev_b64 v[7:8], 3, v[3:4]
s_waitcnt lgkmcnt(0)
v_mul_lo_u32 v9, v2, s8
v_add_co_u32 v2, vcc_lo, s4, v5
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v6, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v7
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v8, vcc_lo
v_subrev_nc_u32_e32 v6, s2, v9
s_mov_b32 s1, 0
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_4
.p2align 6
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s0
v_add_nc_u32_e32 v0, 1, v0
v_add_co_u32 v2, vcc_lo, v2, 4
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_ge_i32_e32 vcc_lo, v0, v1
v_add_co_u32 v4, s0, v4, 8
v_add_co_ci_u32_e64 v5, s0, 0, v5, s0
s_or_b32 s1, vcc_lo, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execz .LBB0_6
.LBB0_4:
global_load_b32 v7, v[2:3], off
s_waitcnt vmcnt(0)
v_cmp_le_i32_e32 vcc_lo, s2, v7
v_cmp_ge_i32_e64 s0, s3, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s4, vcc_lo, s0
s_and_saveexec_b32 s0, s4
s_cbranch_execz .LBB0_3
global_load_b64 v[8:9], v[4:5], off
v_add_nc_u32_e32 v10, v6, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v11, 31, v10
v_lshlrev_b64 v[10:11], 3, v[10:11]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v10, vcc_lo, s6, v10
v_add_co_ci_u32_e32 v11, vcc_lo, s7, v11, vcc_lo
s_waitcnt vmcnt(0)
global_store_b64 v[10:11], v[8:9], off
s_branch .LBB0_3
.LBB0_6:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel slice_sparse_dense_row
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 312
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size slice_sparse_dense_row, .Lfunc_end0-slice_sparse_dense_row
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 44
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: by_value
- .offset: 56
.size: 4
.value_kind: hidden_block_count_x
- .offset: 60
.size: 4
.value_kind: hidden_block_count_y
- .offset: 64
.size: 4
.value_kind: hidden_block_count_z
- .offset: 68
.size: 2
.value_kind: hidden_group_size_x
- .offset: 70
.size: 2
.value_kind: hidden_group_size_y
- .offset: 72
.size: 2
.value_kind: hidden_group_size_z
- .offset: 74
.size: 2
.value_kind: hidden_remainder_x
- .offset: 76
.size: 2
.value_kind: hidden_remainder_y
- .offset: 78
.size: 2
.value_kind: hidden_remainder_z
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 120
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 312
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: slice_sparse_dense_row
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: slice_sparse_dense_row.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void slice_sparse_dense_row(double* inVal, int* inRowPtr, int* colInd, double* ret, int rl, int ru, int cl, int cu, int retClen) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int rowIndex = index + rl;
if (rowIndex <= ru){
/*
* TODO: Alternative approach: use dynamic parallelism. We are skipping this for now to avoid
* the complexity of two-step separate compilation and linking process.
*
* extern "C"
* __global__ void slice_sparse_dense_row_helper(double* inVal, int* inRowPtr, int* colInd, double* ret,
* int rl, int ru, int cl, int cu, int retClen, int start, int end, int index) {
* int i = blockIdx.x * blockDim.x + threadIdx.x + start;
* // Only slice if the index falls into the given range
* if(i < end && cl <= colInd[i] && colInd[i] <= cu) {
* ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
* }
* }
*
* int size = inRowPtr[rowIndex+1] - inRowPtr[rowIndex];
* double numThreads = (double)min(size, MAX_NUM_THREADS_CHILD_KERNEL);
* slice_sparse_dense_row_helper<<< ceil(numThreads/ MAX_NUM_THREADS_CHILD_KERNEL), MAX_NUM_THREADS_CHILD_KERNEL>>>(inVal, inRowPtr, colInd, ret,
* rl, ru, cl, cu, retClen, inRowPtr[rowIndex], inRowPtr[rowIndex+1], index);
*
* Two-step compilation and linking process in JCudaKernels's constructor:
* cuLinkAddFile(linkState, CUjitInputType.CU_JIT_INPUT_LIBRARY, "/usr/local/cuda/lib64/libcudadevrt.a", jitOptions);
*/
// Iterate over elements of the row 'rowIndex'.
for(int i = inRowPtr[rowIndex]; i < inRowPtr[rowIndex+1]; i++) {
// Only slice if the index falls into the given range
if(cl <= colInd[i] && colInd[i] <= cu) {
ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
}
}
}
} | .text
.file "slice_sparse_dense_row.hip"
.globl __device_stub__slice_sparse_dense_row # -- Begin function __device_stub__slice_sparse_dense_row
.p2align 4, 0x90
.type __device_stub__slice_sparse_dense_row,@function
__device_stub__slice_sparse_dense_row: # @__device_stub__slice_sparse_dense_row
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $slice_sparse_dense_row, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size __device_stub__slice_sparse_dense_row, .Lfunc_end0-__device_stub__slice_sparse_dense_row
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $slice_sparse_dense_row, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type slice_sparse_dense_row,@object # @slice_sparse_dense_row
.section .rodata,"a",@progbits
.globl slice_sparse_dense_row
.p2align 3, 0x0
slice_sparse_dense_row:
.quad __device_stub__slice_sparse_dense_row
.size slice_sparse_dense_row, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "slice_sparse_dense_row"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__slice_sparse_dense_row
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym slice_sparse_dense_row
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : slice_sparse_dense_row
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ IADD3 R2, R0, c[0x0][0x180], RZ ; /* 0x0000600000027a10 */
/* 0x000fc80007ffe0ff */
/*0050*/ ISETP.GT.AND P0, PT, R2, c[0x0][0x184], PT ; /* 0x0000610002007a0c */
/* 0x000fda0003f04270 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fe200078e00ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0090*/ IMAD.WIDE R2, R2, R7, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fca00078e0207 */
/*00a0*/ LDG.E R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R9, [R2.64+0x4] ; /* 0x0000040402097981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ ISETP.GE.AND P0, PT, R4, R9, PT ; /* 0x000000090400720c */
/* 0x004fda0003f06270 */
/*00d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00e0*/ MOV R10, R4 ; /* 0x00000004000a7202 */
/* 0x000fe20000000f00 */
/*00f0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x8 ; /* 0x00000008ff057424 */
/* 0x000fe200078e00ff */
/*0100*/ MOV R11, c[0x0][0x190] ; /* 0x00006400000b7a02 */
/* 0x000fc60000000f00 */
/*0110*/ IMAD.WIDE R4, R10, R5, c[0x0][0x160] ; /* 0x000058000a047625 */
/* 0x000fc800078e0205 */
/*0120*/ IMAD.WIDE R6, R10, R7, c[0x0][0x170] ; /* 0x00005c000a067625 */
/* 0x000fe200078e0207 */
/*0130*/ MOV R15, R5 ; /* 0x00000005000f7202 */
/* 0x000fc60000000f00 */
/*0140*/ IMAD R0, R0, R11, -c[0x0][0x188] ; /* 0x8000620000007624 */
/* 0x000fe200078e020b */
/*0150*/ MOV R13, R7 ; /* 0x00000007000d7202 */
/* 0x000fe20000000f00 */
/*0160*/ IMAD.MOV.U32 R12, RZ, RZ, R4 ; /* 0x000000ffff0c7224 */
/* 0x000fe400078e0004 */
/*0170*/ IMAD.MOV.U32 R8, RZ, RZ, R6 ; /* 0x000000ffff087224 */
/* 0x000fe400078e0006 */
/*0180*/ IMAD.MOV.U32 R11, RZ, RZ, R9 ; /* 0x000000ffff0b7224 */
/* 0x000fe400078e0009 */
/*0190*/ MOV R9, R13 ; /* 0x0000000d00097202 */
/* 0x000fca0000000f00 */
/*01a0*/ LDG.E R7, [R8.64] ; /* 0x0000000408077981 */
/* 0x000ea4000c1e1900 */
/*01b0*/ ISETP.GT.AND P0, PT, R7, c[0x0][0x18c], PT ; /* 0x0000630007007a0c */
/* 0x004fc80003f04270 */
/*01c0*/ ISETP.LT.OR P0, PT, R7, c[0x0][0x188], P0 ; /* 0x0000620007007a0c */
/* 0x000fda0000701670 */
/*01d0*/ @!P0 IMAD.MOV.U32 R4, RZ, RZ, R12 ; /* 0x000000ffff048224 */
/* 0x000fe200078e000c */
/*01e0*/ @!P0 MOV R5, R15 ; /* 0x0000000f00058202 */
/* 0x000fcc0000000f00 */
/*01f0*/ @!P0 LDG.E.64 R4, [R4.64] ; /* 0x0000000404048981 */
/* 0x000ea2000c1e1b00 */
/*0200*/ @!P0 IMAD.IADD R6, R0, 0x1, R7 ; /* 0x0000000100068824 */
/* 0x000fe200078e0207 */
/*0210*/ @!P0 MOV R7, 0x8 ; /* 0x0000000800078802 */
/* 0x000fca0000000f00 */
/*0220*/ @!P0 IMAD.WIDE R6, R6, R7, c[0x0][0x178] ; /* 0x00005e0006068625 */
/* 0x000fca00078e0207 */
/*0230*/ @!P0 STG.E.64 [R6.64], R4 ; /* 0x0000000406008986 */
/* 0x0041e8000c101b04 */
/*0240*/ @!P0 LDG.E R11, [R2.64+0x4] ; /* 0x00000404020b8981 */
/* 0x000ea2000c1e1900 */
/*0250*/ IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0a7810 */
/* 0x000fe40007ffe0ff */
/*0260*/ IADD3 R8, P2, R8, 0x4, RZ ; /* 0x0000000408087810 */
/* 0x000fe40007f5e0ff */
/*0270*/ IADD3 R12, P1, R12, 0x8, RZ ; /* 0x000000080c0c7810 */
/* 0x000fc60007f3e0ff */
/*0280*/ IMAD.X R13, RZ, RZ, R13, P2 ; /* 0x000000ffff0d7224 */
/* 0x000fe200010e060d */
/*0290*/ IADD3.X R15, RZ, R15, RZ, P1, !PT ; /* 0x0000000fff0f7210 */
/* 0x000fe40000ffe4ff */
/*02a0*/ ISETP.GE.AND P0, PT, R10, R11, PT ; /* 0x0000000b0a00720c */
/* 0x004fda0003f06270 */
/*02b0*/ @!P0 BRA 0x190 ; /* 0xfffffed000008947 */
/* 0x001fea000383ffff */
/*02c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02d0*/ BRA 0x2d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected slice_sparse_dense_row
.globl slice_sparse_dense_row
.p2align 8
.type slice_sparse_dense_row,@function
slice_sparse_dense_row:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x44
s_load_b64 s[2:3], s[0:1], 0x20
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, s15, s4, v[0:1]
v_add_nc_u32_e32 v0, s2, v2
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_ge_i32_e64 s3, v0
s_cbranch_execz .LBB0_6
s_load_b64 s[2:3], s[0:1], 0x8
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b64 v[0:1], v[0:1], off
s_waitcnt vmcnt(0)
v_cmp_lt_i32_e32 vcc_lo, v0, v1
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_6
s_clause 0x3
s_load_b32 s8, s[0:1], 0x30
s_load_b128 s[4:7], s[0:1], 0x10
s_load_b64 s[2:3], s[0:1], 0x28
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v4, 31, v0
v_mov_b32_e32 v3, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[5:6], 2, v[3:4]
v_lshlrev_b64 v[7:8], 3, v[3:4]
s_waitcnt lgkmcnt(0)
v_mul_lo_u32 v9, v2, s8
v_add_co_u32 v2, vcc_lo, s4, v5
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v6, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v7
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v8, vcc_lo
v_subrev_nc_u32_e32 v6, s2, v9
s_mov_b32 s1, 0
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_4
.p2align 6
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s0
v_add_nc_u32_e32 v0, 1, v0
v_add_co_u32 v2, vcc_lo, v2, 4
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_ge_i32_e32 vcc_lo, v0, v1
v_add_co_u32 v4, s0, v4, 8
v_add_co_ci_u32_e64 v5, s0, 0, v5, s0
s_or_b32 s1, vcc_lo, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execz .LBB0_6
.LBB0_4:
global_load_b32 v7, v[2:3], off
s_waitcnt vmcnt(0)
v_cmp_le_i32_e32 vcc_lo, s2, v7
v_cmp_ge_i32_e64 s0, s3, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s4, vcc_lo, s0
s_and_saveexec_b32 s0, s4
s_cbranch_execz .LBB0_3
global_load_b64 v[8:9], v[4:5], off
v_add_nc_u32_e32 v10, v6, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v11, 31, v10
v_lshlrev_b64 v[10:11], 3, v[10:11]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v10, vcc_lo, s6, v10
v_add_co_ci_u32_e32 v11, vcc_lo, s7, v11, vcc_lo
s_waitcnt vmcnt(0)
global_store_b64 v[10:11], v[8:9], off
s_branch .LBB0_3
.LBB0_6:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel slice_sparse_dense_row
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 312
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size slice_sparse_dense_row, .Lfunc_end0-slice_sparse_dense_row
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 44
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: by_value
- .offset: 56
.size: 4
.value_kind: hidden_block_count_x
- .offset: 60
.size: 4
.value_kind: hidden_block_count_y
- .offset: 64
.size: 4
.value_kind: hidden_block_count_z
- .offset: 68
.size: 2
.value_kind: hidden_group_size_x
- .offset: 70
.size: 2
.value_kind: hidden_group_size_y
- .offset: 72
.size: 2
.value_kind: hidden_group_size_z
- .offset: 74
.size: 2
.value_kind: hidden_remainder_x
- .offset: 76
.size: 2
.value_kind: hidden_remainder_y
- .offset: 78
.size: 2
.value_kind: hidden_remainder_z
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 120
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 312
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: slice_sparse_dense_row
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: slice_sparse_dense_row.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00053c34_00000000-6_slice_sparse_dense_row.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z54__device_stub__Z22slice_sparse_dense_rowPdPiS0_S_iiiiiPdPiS0_S_iiiii
.type _Z54__device_stub__Z22slice_sparse_dense_rowPdPiS0_S_iiiiiPdPiS0_S_iiiii, @function
_Z54__device_stub__Z22slice_sparse_dense_rowPdPiS0_S_iiiiiPdPiS0_S_iiiii:
.LFB2051:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
leaq 224(%rsp), %rax
movq %rax, 176(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq slice_sparse_dense_row(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z54__device_stub__Z22slice_sparse_dense_rowPdPiS0_S_iiiiiPdPiS0_S_iiiii, .-_Z54__device_stub__Z22slice_sparse_dense_rowPdPiS0_S_iiiiiPdPiS0_S_iiiii
.globl slice_sparse_dense_row
.type slice_sparse_dense_row, @function
slice_sparse_dense_row:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
call _Z54__device_stub__Z22slice_sparse_dense_rowPdPiS0_S_iiiiiPdPiS0_S_iiiii
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size slice_sparse_dense_row, .-slice_sparse_dense_row
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "slice_sparse_dense_row"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq slice_sparse_dense_row(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "slice_sparse_dense_row.hip"
.globl __device_stub__slice_sparse_dense_row # -- Begin function __device_stub__slice_sparse_dense_row
.p2align 4, 0x90
.type __device_stub__slice_sparse_dense_row,@function
__device_stub__slice_sparse_dense_row: # @__device_stub__slice_sparse_dense_row
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $slice_sparse_dense_row, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size __device_stub__slice_sparse_dense_row, .Lfunc_end0-__device_stub__slice_sparse_dense_row
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $slice_sparse_dense_row, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type slice_sparse_dense_row,@object # @slice_sparse_dense_row
.section .rodata,"a",@progbits
.globl slice_sparse_dense_row
.p2align 3, 0x0
slice_sparse_dense_row:
.quad __device_stub__slice_sparse_dense_row
.size slice_sparse_dense_row, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "slice_sparse_dense_row"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__slice_sparse_dense_row
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym slice_sparse_dense_row
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <cuda_runtime.h>
#include <sys/time.h>
double get_time() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
constexpr int m = 256;
constexpr int n = m * m * m;
constexpr int block_size = 4;
using grid_type = float[m / block_size][m / block_size][m / block_size][4]
[block_size][block_size][block_size];
__global__ void fill(grid_type *grid_) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
float *data = (float *)grid_;
grid_type &grid = *grid_;
for (int k = 0; k < 4; k++) {
data[i + k * n] = 0;
}
}
int main() {
float *a;
cudaMallocManaged(&a, n * sizeof(float) * 4);
auto bs = block_size * block_size * block_size;
std::cout << "bs = " << bs << std::endl;
for (int i = 0; i < 20; i++) {
cudaDeviceSynchronize();
auto t = get_time();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaDeviceSynchronize();
fill<<<n / bs, bs>>>((grid_type *)a);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "device " << milliseconds << std::endl;
}
std::cout << std::endl;
} | code for sm_80
Function : _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.U32.AND P0, PT, R2, 0xffffff, PT ; /* 0x00ffffff0200780c */
/* 0x000fda0003f04070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0070*/ IADD3 R4, R2.reuse, 0x1000000, RZ ; /* 0x0100000002047810 */
/* 0x040fe20007ffe0ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0090*/ IADD3 R6, R2.reuse, 0x2000000, RZ ; /* 0x0200000002067810 */
/* 0x040fe40007ffe0ff */
/*00a0*/ IADD3 R8, R2, 0x3000000, RZ ; /* 0x0300000002087810 */
/* 0x000fca0007ffe0ff */
/*00b0*/ IMAD.WIDE.U32 R2, R2, R9, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fc800078e0009 */
/*00c0*/ IMAD.WIDE.U32 R4, R4, R9.reuse, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x080fe200078e0009 */
/*00d0*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */
/* 0x000fe6000c101904 */
/*00e0*/ IMAD.WIDE.U32 R6, R6, R9.reuse, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x080fe200078e0009 */
/*00f0*/ STG.E [R4.64], RZ ; /* 0x000000ff04007986 */
/* 0x000fe6000c101904 */
/*0100*/ IMAD.WIDE.U32 R8, R8, R9, c[0x0][0x160] ; /* 0x0000580008087625 */
/* 0x000fe200078e0009 */
/*0110*/ STG.E [R6.64], RZ ; /* 0x000000ff06007986 */
/* 0x000fe8000c101904 */
/*0120*/ STG.E [R8.64], RZ ; /* 0x000000ff08007986 */
/* 0x000fe2000c101904 */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <cuda_runtime.h>
#include <sys/time.h>
double get_time() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
constexpr int m = 256;
constexpr int n = m * m * m;
constexpr int block_size = 4;
using grid_type = float[m / block_size][m / block_size][m / block_size][4]
[block_size][block_size][block_size];
__global__ void fill(grid_type *grid_) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
float *data = (float *)grid_;
grid_type &grid = *grid_;
for (int k = 0; k < 4; k++) {
data[i + k * n] = 0;
}
}
int main() {
float *a;
cudaMallocManaged(&a, n * sizeof(float) * 4);
auto bs = block_size * block_size * block_size;
std::cout << "bs = " << bs << std::endl;
for (int i = 0; i < 20; i++) {
cudaDeviceSynchronize();
auto t = get_time();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaDeviceSynchronize();
fill<<<n / bs, bs>>>((grid_type *)a);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "device " << milliseconds << std::endl;
}
std::cout << std::endl;
} | .file "tmpxft_00122c85_00000000-6_fill.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8get_timev
.type _Z8get_timev, @function
_Z8get_timev:
.LFB3669:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LC0(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size _Z8get_timev, .-_Z8get_timev
.globl _Z47__device_stub__Z4fillPA64_A64_A64_A4_A4_A4_A4_fPA64_A64_A64_A4_A4_A4_A4_f
.type _Z47__device_stub__Z4fillPA64_A64_A64_A4_A4_A4_A4_fPA64_A64_A64_A4_A4_A4_A4_f, @function
_Z47__device_stub__Z4fillPA64_A64_A64_A4_A4_A4_A4_fPA64_A64_A64_A4_A4_A4_A4_f:
.LFB3695:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4fillPA64_A64_A64_A4_A4_A4_A4_f(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z47__device_stub__Z4fillPA64_A64_A64_A4_A4_A4_A4_fPA64_A64_A64_A4_A4_A4_A4_f, .-_Z47__device_stub__Z4fillPA64_A64_A64_A4_A4_A4_A4_fPA64_A64_A64_A4_A4_A4_A4_f
.globl _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.type _Z4fillPA64_A64_A64_A4_A4_A4_A4_f, @function
_Z4fillPA64_A64_A64_A4_A4_A4_A4_f:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z47__device_stub__Z4fillPA64_A64_A64_A4_A4_A4_A4_fPA64_A64_A64_A4_A4_A4_A4_f
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z4fillPA64_A64_A64_A4_A4_A4_A4_f, .-_Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "bs = "
.LC3:
.string "device "
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movl $1, %edx
movl $268435456, %esi
call cudaMallocManaged@PLT
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $64, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $20, %r12d
leaq 16(%rsp), %r15
leaq 24(%rsp), %r14
leaq _ZSt4cout(%rip), %r13
jmp .L21
.L27:
movq 8(%rsp), %rdi
call _Z47__device_stub__Z4fillPA64_A64_A64_A4_A4_A4_A4_fPA64_A64_A64_A4_A4_A4_A4_f
jmp .L16
.L28:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L25
call _ZSt16__throw_bad_castv@PLT
.L25:
call __stack_chk_fail@PLT
.L19:
movq %rbp, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq 0(%rbp), %rax
movl $10, %esi
movq %rbp, %rdi
call *48(%rax)
movl %eax, %esi
.L20:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
subl $1, %r12d
je .L26
.L21:
call cudaDeviceSynchronize@PLT
call _Z8get_timev
movq %r15, %rdi
call cudaEventCreate@PLT
movq %r14, %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
call cudaDeviceSynchronize@PLT
movl $64, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $262144, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L27
.L16:
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movq 24(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $0x00000000, 44(%rsp)
leaq 44(%rsp), %rdi
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $8, %edx
leaq .LC3(%rip), %rsi
movq %r13, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
pxor %xmm0, %xmm0
cvtss2sd 44(%rsp), %xmm0
movq %r13, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbp
testq %rbp, %rbp
je .L28
cmpb $0, 56(%rbp)
je .L19
movzbl 67(%rbp), %esi
jmp .L20
.L26:
leaq _ZSt4cout(%rip), %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L29
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "_Z4fillPA64_A64_A64_A4_A4_A4_A4_f"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z4fillPA64_A64_A64_A4_A4_A4_A4_f(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -1598689907
.long 1051772663
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <cuda_runtime.h>
#include <sys/time.h>
double get_time() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
constexpr int m = 256;
constexpr int n = m * m * m;
constexpr int block_size = 4;
using grid_type = float[m / block_size][m / block_size][m / block_size][4]
[block_size][block_size][block_size];
__global__ void fill(grid_type *grid_) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
float *data = (float *)grid_;
grid_type &grid = *grid_;
for (int k = 0; k < 4; k++) {
data[i + k * n] = 0;
}
}
int main() {
float *a;
cudaMallocManaged(&a, n * sizeof(float) * 4);
auto bs = block_size * block_size * block_size;
std::cout << "bs = " << bs << std::endl;
for (int i = 0; i < 20; i++) {
cudaDeviceSynchronize();
auto t = get_time();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaDeviceSynchronize();
fill<<<n / bs, bs>>>((grid_type *)a);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "device " << milliseconds << std::endl;
}
std::cout << std::endl;
} | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <sys/time.h>
double get_time() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
constexpr int m = 256;
constexpr int n = m * m * m;
constexpr int block_size = 4;
using grid_type = float[m / block_size][m / block_size][m / block_size][4]
[block_size][block_size][block_size];
__global__ void fill(grid_type *grid_) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
float *data = (float *)grid_;
grid_type &grid = *grid_;
for (int k = 0; k < 4; k++) {
data[i + k * n] = 0;
}
}
int main() {
float *a;
hipMallocManaged(&a, n * sizeof(float) * 4);
auto bs = block_size * block_size * block_size;
std::cout << "bs = " << bs << std::endl;
for (int i = 0; i < 20; i++) {
hipDeviceSynchronize();
auto t = get_time();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipDeviceSynchronize();
fill<<<n / bs, bs>>>((grid_type *)a);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << "device " << milliseconds << std::endl;
}
std::cout << std::endl;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <sys/time.h>
double get_time() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
constexpr int m = 256;
constexpr int n = m * m * m;
constexpr int block_size = 4;
using grid_type = float[m / block_size][m / block_size][m / block_size][4]
[block_size][block_size][block_size];
__global__ void fill(grid_type *grid_) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
float *data = (float *)grid_;
grid_type &grid = *grid_;
for (int k = 0; k < 4; k++) {
data[i + k * n] = 0;
}
}
int main() {
float *a;
hipMallocManaged(&a, n * sizeof(float) * 4);
auto bs = block_size * block_size * block_size;
std::cout << "bs = " << bs << std::endl;
for (int i = 0; i < 20; i++) {
hipDeviceSynchronize();
auto t = get_time();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipDeviceSynchronize();
fill<<<n / bs, bs>>>((grid_type *)a);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << "device " << milliseconds << std::endl;
}
std::cout << std::endl;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.globl _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.p2align 8
.type _Z4fillPA64_A64_A64_A4_A4_A4_A4_f,@function
_Z4fillPA64_A64_A64_A4_A4_A4_A4_f:
s_load_b32 s2, s[0:1], 0x14
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e32 0x1000000, v1
s_cbranch_execz .LBB0_3
s_load_b64 s[0:1], s[0:1], 0x0
v_mov_b32_e32 v3, 0
s_mov_b32 s2, 0
.LBB0_2:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_add_nc_u32_e32 v2, s2, v1
s_add_i32 s2, s2, 0x1000000
s_cmp_lg_u32 s2, 0x4000000
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s0, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
global_store_b32 v[4:5], v3, off
s_cbranch_scc1 .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4fillPA64_A64_A64_A4_A4_A4_A4_f, .Lfunc_end0-_Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z4fillPA64_A64_A64_A4_A4_A4_A4_f.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <sys/time.h>
double get_time() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
constexpr int m = 256;
constexpr int n = m * m * m;
constexpr int block_size = 4;
using grid_type = float[m / block_size][m / block_size][m / block_size][4]
[block_size][block_size][block_size];
__global__ void fill(grid_type *grid_) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
float *data = (float *)grid_;
grid_type &grid = *grid_;
for (int k = 0; k < 4; k++) {
data[i + k * n] = 0;
}
}
int main() {
float *a;
hipMallocManaged(&a, n * sizeof(float) * 4);
auto bs = block_size * block_size * block_size;
std::cout << "bs = " << bs << std::endl;
for (int i = 0; i < 20; i++) {
hipDeviceSynchronize();
auto t = get_time();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipDeviceSynchronize();
fill<<<n / bs, bs>>>((grid_type *)a);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << "device " << milliseconds << std::endl;
}
std::cout << std::endl;
} | .text
.file "fill.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z8get_timev
.LCPI0_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z8get_timev
.p2align 4, 0x90
.type _Z8get_timev,@function
_Z8get_timev: # @_Z8get_timev
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z8get_timev, .Lfunc_end0-_Z8get_timev
.cfi_endproc
# -- End function
.globl _Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f # -- Begin function _Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f
.p2align 4, 0x90
.type _Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f,@function
_Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f: # @_Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z4fillPA64_A64_A64_A4_A4_A4_A4_f, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end1:
.size _Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f, .Lfunc_end1-_Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $88, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 40(%rsp), %rdi
movl $268435456, %esi # imm = 0x10000000
movl $1, %edx
callq hipMallocManaged
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $5, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $64, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB2_17
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB2_3
# %bb.2:
movzbl 67(%rbx), %ecx
jmp .LBB2_4
.LBB2_3:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB2_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movabsq $4294967360, %rbx # imm = 0x100000040
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $20, %r15d
leaq 16(%rsp), %r14
movq %rbx, %r12
leaq 262080(%rbx), %r13
jmp .LBB2_5
.p2align 4, 0x90
.LBB2_15: # in Loop: Header=BB2_5 Depth=1
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_16: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit17
# in Loop: Header=BB2_5 Depth=1
movsbl %al, %esi
movq %rbp, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
decl %r15d
je .LBB2_10
.LBB2_5: # =>This Inner Loop Header: Depth=1
callq hipDeviceSynchronize
movq %r14, %rdi
xorl %esi, %esi
callq gettimeofday
leaq 8(%rsp), %rdi
callq hipEventCreate
movq %rsp, %rdi
callq hipEventCreate
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
callq hipDeviceSynchronize
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_7
# %bb.6: # in Loop: Header=BB2_5 Depth=1
movq 40(%rsp), %rax
movq %rax, 80(%rsp)
leaq 80(%rsp), %rax
movq %rax, 32(%rsp)
movq %r14, %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 16(%rsp), %rsi
movl 24(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
movl $_Z4fillPA64_A64_A64_A4_A4_A4_A4_f, %edi
leaq 32(%rsp), %r9
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_7: # in Loop: Header=BB2_5 Depth=1
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
movl $0, 16(%rsp)
movq 8(%rsp), %rsi
movq (%rsp), %rdx
movq %r14, %rdi
callq hipEventElapsedTime
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $8, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 16(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq %rax, %rbp
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %rbx
testq %rbx, %rbx
je .LBB2_17
# %bb.8: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i14
# in Loop: Header=BB2_5 Depth=1
cmpb $0, 56(%rbx)
je .LBB2_15
# %bb.9: # in Loop: Header=BB2_5 Depth=1
movzbl 67(%rbx), %eax
jmp .LBB2_16
.LBB2_10:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB2_17
# %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i9
cmpb $0, 56(%rbx)
je .LBB2_13
# %bb.12:
movzbl 67(%rbx), %eax
jmp .LBB2_14
.LBB2_13:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit12
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $88, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_17:
.cfi_def_cfa_offset 144
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4fillPA64_A64_A64_A4_A4_A4_A4_f, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4fillPA64_A64_A64_A4_A4_A4_A4_f,@object # @_Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.section .rodata,"a",@progbits
.globl _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.p2align 3, 0x0
_Z4fillPA64_A64_A64_A4_A4_A4_A4_f:
.quad _Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f
.size _Z4fillPA64_A64_A64_A4_A4_A4_A4_f, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "bs = "
.size .L.str, 6
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "device "
.size .L.str.1, 9
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4fillPA64_A64_A64_A4_A4_A4_A4_f"
.size .L__unnamed_1, 34
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.U32.AND P0, PT, R2, 0xffffff, PT ; /* 0x00ffffff0200780c */
/* 0x000fda0003f04070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0070*/ IADD3 R4, R2.reuse, 0x1000000, RZ ; /* 0x0100000002047810 */
/* 0x040fe20007ffe0ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0090*/ IADD3 R6, R2.reuse, 0x2000000, RZ ; /* 0x0200000002067810 */
/* 0x040fe40007ffe0ff */
/*00a0*/ IADD3 R8, R2, 0x3000000, RZ ; /* 0x0300000002087810 */
/* 0x000fca0007ffe0ff */
/*00b0*/ IMAD.WIDE.U32 R2, R2, R9, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fc800078e0009 */
/*00c0*/ IMAD.WIDE.U32 R4, R4, R9.reuse, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x080fe200078e0009 */
/*00d0*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */
/* 0x000fe6000c101904 */
/*00e0*/ IMAD.WIDE.U32 R6, R6, R9.reuse, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x080fe200078e0009 */
/*00f0*/ STG.E [R4.64], RZ ; /* 0x000000ff04007986 */
/* 0x000fe6000c101904 */
/*0100*/ IMAD.WIDE.U32 R8, R8, R9, c[0x0][0x160] ; /* 0x0000580008087625 */
/* 0x000fe200078e0009 */
/*0110*/ STG.E [R6.64], RZ ; /* 0x000000ff06007986 */
/* 0x000fe8000c101904 */
/*0120*/ STG.E [R8.64], RZ ; /* 0x000000ff08007986 */
/* 0x000fe2000c101904 */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.globl _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.p2align 8
.type _Z4fillPA64_A64_A64_A4_A4_A4_A4_f,@function
_Z4fillPA64_A64_A64_A4_A4_A4_A4_f:
s_load_b32 s2, s[0:1], 0x14
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e32 0x1000000, v1
s_cbranch_execz .LBB0_3
s_load_b64 s[0:1], s[0:1], 0x0
v_mov_b32_e32 v3, 0
s_mov_b32 s2, 0
.LBB0_2:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_add_nc_u32_e32 v2, s2, v1
s_add_i32 s2, s2, 0x1000000
s_cmp_lg_u32 s2, 0x4000000
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s0, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
global_store_b32 v[4:5], v3, off
s_cbranch_scc1 .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4fillPA64_A64_A64_A4_A4_A4_A4_f, .Lfunc_end0-_Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z4fillPA64_A64_A64_A4_A4_A4_A4_f.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00122c85_00000000-6_fill.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8get_timev
.type _Z8get_timev, @function
_Z8get_timev:
.LFB3669:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LC0(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size _Z8get_timev, .-_Z8get_timev
.globl _Z47__device_stub__Z4fillPA64_A64_A64_A4_A4_A4_A4_fPA64_A64_A64_A4_A4_A4_A4_f
.type _Z47__device_stub__Z4fillPA64_A64_A64_A4_A4_A4_A4_fPA64_A64_A64_A4_A4_A4_A4_f, @function
_Z47__device_stub__Z4fillPA64_A64_A64_A4_A4_A4_A4_fPA64_A64_A64_A4_A4_A4_A4_f:
.LFB3695:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4fillPA64_A64_A64_A4_A4_A4_A4_f(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z47__device_stub__Z4fillPA64_A64_A64_A4_A4_A4_A4_fPA64_A64_A64_A4_A4_A4_A4_f, .-_Z47__device_stub__Z4fillPA64_A64_A64_A4_A4_A4_A4_fPA64_A64_A64_A4_A4_A4_A4_f
.globl _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.type _Z4fillPA64_A64_A64_A4_A4_A4_A4_f, @function
_Z4fillPA64_A64_A64_A4_A4_A4_A4_f:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z47__device_stub__Z4fillPA64_A64_A64_A4_A4_A4_A4_fPA64_A64_A64_A4_A4_A4_A4_f
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z4fillPA64_A64_A64_A4_A4_A4_A4_f, .-_Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "bs = "
.LC3:
.string "device "
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movl $1, %edx
movl $268435456, %esi
call cudaMallocManaged@PLT
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $64, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $20, %r12d
leaq 16(%rsp), %r15
leaq 24(%rsp), %r14
leaq _ZSt4cout(%rip), %r13
jmp .L21
.L27:
movq 8(%rsp), %rdi
call _Z47__device_stub__Z4fillPA64_A64_A64_A4_A4_A4_A4_fPA64_A64_A64_A4_A4_A4_A4_f
jmp .L16
.L28:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L25
call _ZSt16__throw_bad_castv@PLT
.L25:
call __stack_chk_fail@PLT
.L19:
movq %rbp, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq 0(%rbp), %rax
movl $10, %esi
movq %rbp, %rdi
call *48(%rax)
movl %eax, %esi
.L20:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
subl $1, %r12d
je .L26
.L21:
call cudaDeviceSynchronize@PLT
call _Z8get_timev
movq %r15, %rdi
call cudaEventCreate@PLT
movq %r14, %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
call cudaDeviceSynchronize@PLT
movl $64, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $262144, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L27
.L16:
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movq 24(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $0x00000000, 44(%rsp)
leaq 44(%rsp), %rdi
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $8, %edx
leaq .LC3(%rip), %rsi
movq %r13, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
pxor %xmm0, %xmm0
cvtss2sd 44(%rsp), %xmm0
movq %r13, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbp
testq %rbp, %rbp
je .L28
cmpb $0, 56(%rbp)
je .L19
movzbl 67(%rbp), %esi
jmp .L20
.L26:
leaq _ZSt4cout(%rip), %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L29
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "_Z4fillPA64_A64_A64_A4_A4_A4_A4_f"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z4fillPA64_A64_A64_A4_A4_A4_A4_f(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -1598689907
.long 1051772663
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "fill.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z8get_timev
.LCPI0_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z8get_timev
.p2align 4, 0x90
.type _Z8get_timev,@function
_Z8get_timev: # @_Z8get_timev
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z8get_timev, .Lfunc_end0-_Z8get_timev
.cfi_endproc
# -- End function
.globl _Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f # -- Begin function _Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f
.p2align 4, 0x90
.type _Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f,@function
_Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f: # @_Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z4fillPA64_A64_A64_A4_A4_A4_A4_f, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end1:
.size _Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f, .Lfunc_end1-_Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $88, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 40(%rsp), %rdi
movl $268435456, %esi # imm = 0x10000000
movl $1, %edx
callq hipMallocManaged
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $5, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $64, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB2_17
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB2_3
# %bb.2:
movzbl 67(%rbx), %ecx
jmp .LBB2_4
.LBB2_3:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB2_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movabsq $4294967360, %rbx # imm = 0x100000040
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $20, %r15d
leaq 16(%rsp), %r14
movq %rbx, %r12
leaq 262080(%rbx), %r13
jmp .LBB2_5
.p2align 4, 0x90
.LBB2_15: # in Loop: Header=BB2_5 Depth=1
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_16: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit17
# in Loop: Header=BB2_5 Depth=1
movsbl %al, %esi
movq %rbp, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
decl %r15d
je .LBB2_10
.LBB2_5: # =>This Inner Loop Header: Depth=1
callq hipDeviceSynchronize
movq %r14, %rdi
xorl %esi, %esi
callq gettimeofday
leaq 8(%rsp), %rdi
callq hipEventCreate
movq %rsp, %rdi
callq hipEventCreate
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
callq hipDeviceSynchronize
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_7
# %bb.6: # in Loop: Header=BB2_5 Depth=1
movq 40(%rsp), %rax
movq %rax, 80(%rsp)
leaq 80(%rsp), %rax
movq %rax, 32(%rsp)
movq %r14, %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 16(%rsp), %rsi
movl 24(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
movl $_Z4fillPA64_A64_A64_A4_A4_A4_A4_f, %edi
leaq 32(%rsp), %r9
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_7: # in Loop: Header=BB2_5 Depth=1
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
movl $0, 16(%rsp)
movq 8(%rsp), %rsi
movq (%rsp), %rdx
movq %r14, %rdi
callq hipEventElapsedTime
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $8, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 16(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq %rax, %rbp
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %rbx
testq %rbx, %rbx
je .LBB2_17
# %bb.8: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i14
# in Loop: Header=BB2_5 Depth=1
cmpb $0, 56(%rbx)
je .LBB2_15
# %bb.9: # in Loop: Header=BB2_5 Depth=1
movzbl 67(%rbx), %eax
jmp .LBB2_16
.LBB2_10:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB2_17
# %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i9
cmpb $0, 56(%rbx)
je .LBB2_13
# %bb.12:
movzbl 67(%rbx), %eax
jmp .LBB2_14
.LBB2_13:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit12
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $88, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_17:
.cfi_def_cfa_offset 144
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4fillPA64_A64_A64_A4_A4_A4_A4_f, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4fillPA64_A64_A64_A4_A4_A4_A4_f,@object # @_Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.section .rodata,"a",@progbits
.globl _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.p2align 3, 0x0
_Z4fillPA64_A64_A64_A4_A4_A4_A4_f:
.quad _Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f
.size _Z4fillPA64_A64_A64_A4_A4_A4_A4_f, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "bs = "
.size .L.str, 6
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "device "
.size .L.str.1, 9
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4fillPA64_A64_A64_A4_A4_A4_A4_f"
.size .L__unnamed_1, 34
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__fillPA64_A64_A64_A4_A4_A4_A4_f
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4fillPA64_A64_A64_A4_A4_A4_A4_f
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void MatrixMulDevice( float *A, float *B, float *C, int *matrixSize)
{
int chunk = (*matrixSize) / gridDim.x;
int sum, i, k;
for(i = blockIdx.x * chunk; i < blockIdx.x * chunk + chunk - 1; i++) {
sum = 0;
for(k = 0; k < *matrixSize; k++) {
sum += A[i * *matrixSize + k] * B [k * *matrixSize + threadIdx.x];
}
C[i * *matrixSize + threadIdx.x] = sum;
}
} | code for sm_80
Function : _Z15MatrixMulDevicePfS_S_Pi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff027624 */
/* 0x000fe200078e00ff */
/*0020*/ MOV R3, c[0x0][0x17c] ; /* 0x00005f0000037a02 */
/* 0x000fe20000000f00 */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fc80000000a00 */
/*0040*/ LDG.E R0, [R2.64] ; /* 0x0000000602007981 */
/* 0x000ea2000c1e1900 */
/*0050*/ I2F.U32.RP R6, c[0x0][0xc] ; /* 0x0000030000067b06 */
/* 0x000e220000209000 */
/*0060*/ ISETP.NE.U32.AND P2, PT, RZ, c[0x0][0xc], PT ; /* 0x00000300ff007a0c */
/* 0x000fce0003f45070 */
/*0070*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x001e240000001000 */
/*0080*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */
/* 0x001fcc0007ffe0ff */
/*0090*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*00a0*/ HFMA2.MMA R4, -RZ, RZ, 0, 0 ; /* 0x00000000ff047435 */
/* 0x001fe200000001ff */
/*00b0*/ IMAD.MOV R7, RZ, RZ, -R5 ; /* 0x000000ffff077224 */
/* 0x002fc800078e0a05 */
/*00c0*/ IMAD R7, R7, c[0x0][0xc], RZ ; /* 0x0000030007077a24 */
/* 0x000fca00078e02ff */
/*00d0*/ IMAD.HI.U32 R5, R5, R7, R4 ; /* 0x0000000705057227 */
/* 0x000fe400078e0004 */
/*00e0*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*00f0*/ IMAD.HI.U32 R5, R5, R0, RZ ; /* 0x0000000005057227 */
/* 0x004fc800078e00ff */
/*0100*/ IMAD.MOV R7, RZ, RZ, -R5 ; /* 0x000000ffff077224 */
/* 0x000fc800078e0a05 */
/*0110*/ IMAD R0, R7, c[0x0][0xc], R0 ; /* 0x0000030007007a24 */
/* 0x000fca00078e0200 */
/*0120*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0xc], PT ; /* 0x0000030000007a0c */
/* 0x000fda0003f06070 */
/*0130*/ @P0 IADD3 R0, R0, -c[0x0][0xc], RZ ; /* 0x8000030000000a10 */
/* 0x000fe40007ffe0ff */
/*0140*/ @P0 IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105050810 */
/* 0x000fe40007ffe0ff */
/*0150*/ ISETP.GE.U32.AND P1, PT, R0, c[0x0][0xc], PT ; /* 0x0000030000007a0c */
/* 0x000fda0003f26070 */
/*0160*/ @P1 IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105051810 */
/* 0x000fe40007ffe0ff */
/*0170*/ @!P2 LOP3.LUT R5, RZ, c[0x0][0xc], RZ, 0x33, !PT ; /* 0x00000300ff05aa12 */
/* 0x000fca00078e33ff */
/*0180*/ IMAD R15, R5, R4, RZ ; /* 0x00000004050f7224 */
/* 0x001fca00078e02ff */
/*0190*/ IADD3 R0, R5, -0x1, R15 ; /* 0xffffffff05007810 */
/* 0x000fc80007ffe00f */
/*01a0*/ ISETP.GE.U32.AND P0, PT, R15, R0, PT ; /* 0x000000000f00720c */
/* 0x000fda0003f06070 */
/*01b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*01c0*/ S2R R14, SR_TID.X ; /* 0x00000000000e7919 */
/* 0x000e240000002100 */
/*01d0*/ LDG.E R17, [R2.64] ; /* 0x0000000602117981 */
/* 0x000ea4000c1e1900 */
/*01e0*/ ISETP.GT.AND P0, PT, R17, RZ, PT ; /* 0x000000ff1100720c */
/* 0x004fda0003f04270 */
/*01f0*/ @!P0 MOV R21, RZ ; /* 0x000000ff00158202 */
/* 0x001fe20000000f00 */
/*0200*/ @!P0 BRA 0x6f0 ; /* 0x000004e000008947 */
/* 0x000fea0003800000 */
/*0210*/ IADD3 R4, R17.reuse, -0x1, RZ ; /* 0xffffffff11047810 */
/* 0x040fe20007ffe0ff */
/*0220*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0230*/ LOP3.LUT R16, R17.reuse, 0x3, RZ, 0xc0, !PT ; /* 0x0000000311107812 */
/* 0x040fe200078ec0ff */
/*0240*/ IMAD R18, R17, R15, RZ ; /* 0x0000000f11127224 */
/* 0x000fe200078e02ff */
/*0250*/ ISETP.GE.U32.AND P1, PT, R4, 0x3, PT ; /* 0x000000030400780c */
/* 0x000fe20003f26070 */
/*0260*/ IMAD.MOV.U32 R21, RZ, RZ, RZ ; /* 0x000000ffff157224 */
/* 0x000fe200078e00ff */
/*0270*/ ISETP.NE.AND P0, PT, R16, RZ, PT ; /* 0x000000ff1000720c */
/* 0x000fd60003f05270 */
/*0280*/ @!P1 BRA 0x530 ; /* 0x000002a000009947 */
/* 0x000fea0003800000 */
/*0290*/ HFMA2.MMA R19, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff137435 */
/* 0x000fe200000001ff */
/*02a0*/ IMAD.IADD R22, R17.reuse, 0x1, R14.reuse ; /* 0x0000000111167824 */
/* 0x141fe200078e020e */
/*02b0*/ LEA R20, R17.reuse, R14.reuse, 0x1 ; /* 0x0000000e11147211 */
/* 0x0c0fe200078e08ff */
/*02c0*/ IMAD R24, R17, 0x3, R14 ; /* 0x0000000311187824 */
/* 0x000fe200078e020e */
/*02d0*/ MOV R26, R14 ; /* 0x0000000e001a7202 */
/* 0x000fe20000000f00 */
/*02e0*/ IMAD.IADD R27, R16, 0x1, -R17 ; /* 0x00000001101b7824 */
/* 0x000fe200078e0a11 */
/*02f0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fc80008000000 */
/*0300*/ IMAD.WIDE R4, R18, R19, c[0x0][0x160] ; /* 0x0000580012047625 */
/* 0x000fc800078e0213 */
/*0310*/ IMAD.WIDE.U32 R6, R26, R19.reuse, c[0x0][0x168] ; /* 0x00005a001a067625 */
/* 0x080fe200078e0013 */
/*0320*/ LDG.E R23, [R4.64] ; /* 0x0000000604177981 */
/* 0x0010a6000c1e1900 */
/*0330*/ IMAD.WIDE.U32 R8, R22, R19.reuse, c[0x0][0x168] ; /* 0x00005a0016087625 */
/* 0x080fe200078e0013 */
/*0340*/ LDG.E R25, [R4.64+0x4] ; /* 0x0000040604197981 */
/* 0x0000e8000c1e1900 */
/*0350*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x000ea8000c1e1900 */
/*0360*/ LDG.E R8, [R8.64] ; /* 0x0000000608087981 */
/* 0x000ee2000c1e1900 */
/*0370*/ IMAD.WIDE.U32 R10, R20, R19, c[0x0][0x168] ; /* 0x00005a00140a7625 */
/* 0x000fc600078e0013 */
/*0380*/ LDG.E R29, [R4.64+0x8] ; /* 0x00000806041d7981 */
/* 0x000128000c1e1900 */
/*0390*/ LDG.E R10, [R10.64] ; /* 0x000000060a0a7981 */
/* 0x000f22000c1e1900 */
/*03a0*/ IMAD.WIDE.U32 R12, R24, R19, c[0x0][0x168] ; /* 0x00005a00180c7625 */
/* 0x000fc600078e0013 */
/*03b0*/ LDG.E R28, [R4.64+0xc] ; /* 0x00000c06041c7981 */
/* 0x000168000c1e1900 */
/*03c0*/ LDG.E R12, [R12.64] ; /* 0x000000060c0c7981 */
/* 0x000f62000c1e1900 */
/*03d0*/ I2F R21, R21 ; /* 0x0000001500157306 */
/* 0x002ea20000201400 */
/*03e0*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fe2000fffe03f */
/*03f0*/ IADD3 R4, P2, R4, 0x10, RZ ; /* 0x0000001004047810 */
/* 0x001fe20007f5e0ff */
/*0400*/ IMAD R24, R17.reuse, 0x4, R24 ; /* 0x0000000411187824 */
/* 0x040fe200078e0218 */
/*0410*/ LEA R22, R17.reuse, R22, 0x2 ; /* 0x0000001611167211 */
/* 0x040fe400078e10ff */
/*0420*/ LEA R20, R17.reuse, R20, 0x2 ; /* 0x0000001411147211 */
/* 0x040fe200078e10ff */
/*0430*/ IMAD.X R5, RZ, RZ, R5, P2 ; /* 0x000000ffff057224 */
/* 0x000fe200010e0605 */
/*0440*/ LEA R26, R17, R26, 0x2 ; /* 0x0000001a111a7211 */
/* 0x000fe200078e10ff */
/*0450*/ FFMA R6, R6, R23, R21 ; /* 0x0000001706067223 */
/* 0x004fcc0000000015 */
/*0460*/ F2I.TRUNC.NTZ R6, R6 ; /* 0x0000000600067305 */
/* 0x000e30000020f100 */
/*0470*/ I2F R7, R6 ; /* 0x0000000600077306 */
/* 0x001ee40000201400 */
/*0480*/ FFMA R7, R8, R25, R7 ; /* 0x0000001908077223 */
/* 0x008fcc0000000007 */
/*0490*/ F2I.TRUNC.NTZ R7, R7 ; /* 0x0000000700077305 */
/* 0x000e30000020f100 */
/*04a0*/ I2F R8, R7 ; /* 0x0000000700087306 */
/* 0x001f240000201400 */
/*04b0*/ FFMA R8, R10, R29, R8 ; /* 0x0000001d0a087223 */
/* 0x010fcc0000000008 */
/*04c0*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */
/* 0x000e22000020f100 */
/*04d0*/ IADD3 R10, R27, UR4, RZ ; /* 0x000000041b0a7c10 */
/* 0x000fce000fffe0ff */
/*04e0*/ I2F R9, R8 ; /* 0x0000000800097306 */
/* 0x001f620000201400 */
/*04f0*/ ISETP.NE.AND P1, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fe20003f25270 */
/*0500*/ FFMA R9, R12, R28, R9 ; /* 0x0000001c0c097223 */
/* 0x020fcc0000000009 */
/*0510*/ F2I.TRUNC.NTZ R21, R9 ; /* 0x0000000900157305 */
/* 0x00006c000020f100 */
/*0520*/ @P1 BRA 0x310 ; /* 0xfffffde000001947 */
/* 0x000fea000383ffff */
/*0530*/ @!P0 BRA 0x6f0 ; /* 0x000001b000008947 */
/* 0x000fea0003800000 */
/*0540*/ HFMA2.MMA R13, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0d7435 */
/* 0x000fe200000001ff */
/*0550*/ IADD3 R4, R18, UR4, RZ ; /* 0x0000000412047c10 */
/* 0x000fe2000fffe0ff */
/*0560*/ IMAD R8, R17, UR4, R14 ; /* 0x0000000411087c24 */
/* 0x001fd0000f8e020e */
/*0570*/ IMAD.WIDE.U32 R6, R8, R13, c[0x0][0x168] ; /* 0x00005a0008067625 */
/* 0x000fc800078e000d */
/*0580*/ IMAD.WIDE R4, R4, R13, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e020d */
/*0590*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x000ea8000c1e1900 */
/*05a0*/ LDG.E R9, [R4.64] ; /* 0x0000000604097981 */
/* 0x000ea2000c1e1900 */
/*05b0*/ I2F R21, R21 ; /* 0x0000001500157306 */
/* 0x002ea20000201400 */
/*05c0*/ ISETP.NE.AND P0, PT, R16, 0x1, PT ; /* 0x000000011000780c */
/* 0x000fe20003f05270 */
/*05d0*/ FFMA R9, R6, R9, R21 ; /* 0x0000000906097223 */
/* 0x004fcc0000000015 */
/*05e0*/ F2I.TRUNC.NTZ R21, R9 ; /* 0x0000000900157305 */
/* 0x00006c000020f100 */
/*05f0*/ @!P0 BRA 0x6f0 ; /* 0x000000f000008947 */
/* 0x000fea0003800000 */
/*0600*/ IMAD.IADD R8, R17, 0x1, R8 ; /* 0x0000000111087824 */
/* 0x000fe200078e0208 */
/*0610*/ LDG.E R11, [R4.64+0x4] ; /* 0x00000406040b7981 */
/* 0x000ea6000c1e1900 */
/*0620*/ IMAD.WIDE.U32 R6, R8, R13, c[0x0][0x168] ; /* 0x00005a0008067625 */
/* 0x000fcc00078e000d */
/*0630*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x000ea2000c1e1900 */
/*0640*/ ISETP.NE.AND P0, PT, R16, 0x2, PT ; /* 0x000000021000780c */
/* 0x000fda0003f05270 */
/*0650*/ @P0 IADD3 R8, R17, R8, RZ ; /* 0x0000000811080210 */
/* 0x000fca0007ffe0ff */
/*0660*/ @P0 IMAD.WIDE.U32 R8, R8, R13, c[0x0][0x168] ; /* 0x00005a0008080625 */
/* 0x001fe400078e000d */
/*0670*/ @P0 LDG.E R13, [R4.64+0x8] ; /* 0x00000806040d0981 */
/* 0x000ee8000c1e1900 */
/*0680*/ @P0 LDG.E R8, [R8.64] ; /* 0x0000000608080981 */
/* 0x000ee2000c1e1900 */
/*0690*/ I2F R21, R21 ; /* 0x0000001500157306 */
/* 0x002ea40000201400 */
/*06a0*/ FFMA R11, R6, R11, R21 ; /* 0x0000000b060b7223 */
/* 0x004fcc0000000015 */
/*06b0*/ F2I.TRUNC.NTZ R21, R11 ; /* 0x0000000b00157305 */
/* 0x000e30000020f100 */
/*06c0*/ @P0 I2F R10, R21 ; /* 0x00000015000a0306 */
/* 0x001ee40000201400 */
/*06d0*/ @P0 FFMA R13, R8, R13, R10 ; /* 0x0000000d080d0223 */
/* 0x008fcc000000000a */
/*06e0*/ @P0 F2I.TRUNC.NTZ R21, R13 ; /* 0x0000000d00150305 */
/* 0x000064000020f100 */
/*06f0*/ IMAD R4, R17, R15, R14 ; /* 0x0000000f11047224 */
/* 0x001fe200078e020e */
/*0700*/ I2F R21, R21 ; /* 0x0000001500157306 */
/* 0x002e220000201400 */
/*0710*/ IADD3 R15, R15, 0x1, RZ ; /* 0x000000010f0f7810 */
/* 0x000fe40007ffe0ff */
/*0720*/ MOV R5, 0x4 ; /* 0x0000000400057802 */
/* 0x000fe40000000f00 */
/*0730*/ ISETP.GE.U32.AND P0, PT, R15, R0, PT ; /* 0x000000000f00720c */
/* 0x000fc60003f06070 */
/*0740*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fca00078e0005 */
/*0750*/ STG.E [R4.64], R21 ; /* 0x0000001504007986 */
/* 0x0011ea000c101906 */
/*0760*/ @!P0 BRA 0x1d0 ; /* 0xfffffa6000008947 */
/* 0x000fea000383ffff */
/*0770*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0780*/ BRA 0x780; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0790*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0800*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0810*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0820*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0830*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0840*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0850*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0860*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0870*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void MatrixMulDevice( float *A, float *B, float *C, int *matrixSize)
{
int chunk = (*matrixSize) / gridDim.x;
int sum, i, k;
for(i = blockIdx.x * chunk; i < blockIdx.x * chunk + chunk - 1; i++) {
sum = 0;
for(k = 0; k < *matrixSize; k++) {
sum += A[i * *matrixSize + k] * B [k * *matrixSize + threadIdx.x];
}
C[i * *matrixSize + threadIdx.x] = sum;
}
} | .file "tmpxft_00003130_00000000-6_MatrixMulDevice.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z41__device_stub__Z15MatrixMulDevicePfS_S_PiPfS_S_Pi
.type _Z41__device_stub__Z15MatrixMulDevicePfS_S_PiPfS_S_Pi, @function
_Z41__device_stub__Z15MatrixMulDevicePfS_S_PiPfS_S_Pi:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z15MatrixMulDevicePfS_S_Pi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z41__device_stub__Z15MatrixMulDevicePfS_S_PiPfS_S_Pi, .-_Z41__device_stub__Z15MatrixMulDevicePfS_S_PiPfS_S_Pi
.globl _Z15MatrixMulDevicePfS_S_Pi
.type _Z15MatrixMulDevicePfS_S_Pi, @function
_Z15MatrixMulDevicePfS_S_Pi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z15MatrixMulDevicePfS_S_PiPfS_S_Pi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15MatrixMulDevicePfS_S_Pi, .-_Z15MatrixMulDevicePfS_S_Pi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z15MatrixMulDevicePfS_S_Pi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z15MatrixMulDevicePfS_S_Pi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void MatrixMulDevice( float *A, float *B, float *C, int *matrixSize)
{
int chunk = (*matrixSize) / gridDim.x;
int sum, i, k;
for(i = blockIdx.x * chunk; i < blockIdx.x * chunk + chunk - 1; i++) {
sum = 0;
for(k = 0; k < *matrixSize; k++) {
sum += A[i * *matrixSize + k] * B [k * *matrixSize + threadIdx.x];
}
C[i * *matrixSize + threadIdx.x] = sum;
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void MatrixMulDevice( float *A, float *B, float *C, int *matrixSize)
{
int chunk = (*matrixSize) / gridDim.x;
int sum, i, k;
for(i = blockIdx.x * chunk; i < blockIdx.x * chunk + chunk - 1; i++) {
sum = 0;
for(k = 0; k < *matrixSize; k++) {
sum += A[i * *matrixSize + k] * B [k * *matrixSize + threadIdx.x];
}
C[i * *matrixSize + threadIdx.x] = sum;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void MatrixMulDevice( float *A, float *B, float *C, int *matrixSize)
{
int chunk = (*matrixSize) / gridDim.x;
int sum, i, k;
for(i = blockIdx.x * chunk; i < blockIdx.x * chunk + chunk - 1; i++) {
sum = 0;
for(k = 0; k < *matrixSize; k++) {
sum += A[i * *matrixSize + k] * B [k * *matrixSize + threadIdx.x];
}
C[i * *matrixSize + threadIdx.x] = sum;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15MatrixMulDevicePfS_S_Pi
.globl _Z15MatrixMulDevicePfS_S_Pi
.p2align 8
.type _Z15MatrixMulDevicePfS_S_Pi,@function
_Z15MatrixMulDevicePfS_S_Pi:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x20
s_load_b64 s[2:3], s[0:1], 0x18
s_waitcnt lgkmcnt(0)
v_cvt_f32_u32_e32 v1, s4
s_load_b32 s12, s[2:3], 0x0
s_sub_i32 s3, 0, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
v_cvt_u32_f32_e32 v1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s2, v1
s_mul_i32 s3, s3, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_hi_u32 s3, s2, s3
s_add_i32 s2, s2, s3
s_waitcnt lgkmcnt(0)
s_mul_hi_u32 s2, s12, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_mul_i32 s3, s2, s4
s_add_i32 s5, s2, 1
s_sub_i32 s3, s12, s3
s_sub_i32 s6, s3, s4
s_cmp_ge_u32 s3, s4
s_cselect_b32 s2, s5, s2
s_cselect_b32 s3, s6, s3
s_add_i32 s5, s2, 1
s_cmp_ge_u32 s3, s4
s_cselect_b32 s8, s5, s2
s_add_i32 s2, s15, 1
s_mul_i32 s13, s8, s15
s_mul_i32 s14, s8, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s14, s14, -1
s_cmp_ge_u32 s13, s14
s_cbranch_scc1 .LBB0_6
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
s_cmp_gt_i32 s12, 0
v_mov_b32_e32 v2, 0
s_cselect_b32 s0, -1, 0
s_mul_i32 s1, s8, s15
v_cndmask_b32_e64 v1, 0, 1, s0
s_mul_i32 s8, s1, s12
s_delay_alu instid0(VALU_DEP_1)
v_cmp_ne_u32_e64 s0, 1, v1
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
v_mad_u64_u32 v[4:5], null, s13, s12, v[0:1]
v_mov_b32_e32 v5, v2
v_cvt_f32_i32_e32 v1, v3
s_add_i32 s13, s13, 1
s_add_i32 s8, s8, s12
s_cmp_lt_u32 s13, s14
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s2, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v5, vcc_lo
global_store_b32 v[3:4], v1, off
s_cbranch_scc0 .LBB0_6
.LBB0_3:
v_mov_b32_e32 v3, v2
s_delay_alu instid0(VALU_DEP_2)
s_and_b32 vcc_lo, exec_lo, s0
s_cbranch_vccnz .LBB0_2
s_ashr_i32 s9, s8, 31
v_mov_b32_e32 v3, 0
s_lshl_b64 s[10:11], s[8:9], 2
v_mov_b32_e32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s10, s4, s10
s_addc_u32 s11, s5, s11
s_mov_b32 s1, s12
.p2align 6
.LBB0_5:
v_lshlrev_b64 v[4:5], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_cvt_f32_i32_e32 v3, v3
s_add_i32 s1, s1, -1
v_add_nc_u32_e32 v1, s12, v1
v_add_co_u32 v4, vcc_lo, s6, v4
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo
global_load_b32 v6, v2, s[10:11]
global_load_b32 v4, v[4:5], off
s_add_u32 s10, s10, 4
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s1, 0
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v3, v6, v4
s_delay_alu instid0(VALU_DEP_1)
v_cvt_i32_f32_e32 v3, v3
s_cbranch_scc1 .LBB0_5
s_branch .LBB0_2
.LBB0_6:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15MatrixMulDevicePfS_S_Pi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15MatrixMulDevicePfS_S_Pi, .Lfunc_end0-_Z15MatrixMulDevicePfS_S_Pi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15MatrixMulDevicePfS_S_Pi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15MatrixMulDevicePfS_S_Pi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void MatrixMulDevice( float *A, float *B, float *C, int *matrixSize)
{
int chunk = (*matrixSize) / gridDim.x;
int sum, i, k;
for(i = blockIdx.x * chunk; i < blockIdx.x * chunk + chunk - 1; i++) {
sum = 0;
for(k = 0; k < *matrixSize; k++) {
sum += A[i * *matrixSize + k] * B [k * *matrixSize + threadIdx.x];
}
C[i * *matrixSize + threadIdx.x] = sum;
}
} | .text
.file "MatrixMulDevice.hip"
.globl _Z30__device_stub__MatrixMulDevicePfS_S_Pi # -- Begin function _Z30__device_stub__MatrixMulDevicePfS_S_Pi
.p2align 4, 0x90
.type _Z30__device_stub__MatrixMulDevicePfS_S_Pi,@function
_Z30__device_stub__MatrixMulDevicePfS_S_Pi: # @_Z30__device_stub__MatrixMulDevicePfS_S_Pi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movq %rcx, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15MatrixMulDevicePfS_S_Pi, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z30__device_stub__MatrixMulDevicePfS_S_Pi, .Lfunc_end0-_Z30__device_stub__MatrixMulDevicePfS_S_Pi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15MatrixMulDevicePfS_S_Pi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15MatrixMulDevicePfS_S_Pi,@object # @_Z15MatrixMulDevicePfS_S_Pi
.section .rodata,"a",@progbits
.globl _Z15MatrixMulDevicePfS_S_Pi
.p2align 3, 0x0
_Z15MatrixMulDevicePfS_S_Pi:
.quad _Z30__device_stub__MatrixMulDevicePfS_S_Pi
.size _Z15MatrixMulDevicePfS_S_Pi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15MatrixMulDevicePfS_S_Pi"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__MatrixMulDevicePfS_S_Pi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15MatrixMulDevicePfS_S_Pi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z15MatrixMulDevicePfS_S_Pi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff027624 */
/* 0x000fe200078e00ff */
/*0020*/ MOV R3, c[0x0][0x17c] ; /* 0x00005f0000037a02 */
/* 0x000fe20000000f00 */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fc80000000a00 */
/*0040*/ LDG.E R0, [R2.64] ; /* 0x0000000602007981 */
/* 0x000ea2000c1e1900 */
/*0050*/ I2F.U32.RP R6, c[0x0][0xc] ; /* 0x0000030000067b06 */
/* 0x000e220000209000 */
/*0060*/ ISETP.NE.U32.AND P2, PT, RZ, c[0x0][0xc], PT ; /* 0x00000300ff007a0c */
/* 0x000fce0003f45070 */
/*0070*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x001e240000001000 */
/*0080*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */
/* 0x001fcc0007ffe0ff */
/*0090*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*00a0*/ HFMA2.MMA R4, -RZ, RZ, 0, 0 ; /* 0x00000000ff047435 */
/* 0x001fe200000001ff */
/*00b0*/ IMAD.MOV R7, RZ, RZ, -R5 ; /* 0x000000ffff077224 */
/* 0x002fc800078e0a05 */
/*00c0*/ IMAD R7, R7, c[0x0][0xc], RZ ; /* 0x0000030007077a24 */
/* 0x000fca00078e02ff */
/*00d0*/ IMAD.HI.U32 R5, R5, R7, R4 ; /* 0x0000000705057227 */
/* 0x000fe400078e0004 */
/*00e0*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*00f0*/ IMAD.HI.U32 R5, R5, R0, RZ ; /* 0x0000000005057227 */
/* 0x004fc800078e00ff */
/*0100*/ IMAD.MOV R7, RZ, RZ, -R5 ; /* 0x000000ffff077224 */
/* 0x000fc800078e0a05 */
/*0110*/ IMAD R0, R7, c[0x0][0xc], R0 ; /* 0x0000030007007a24 */
/* 0x000fca00078e0200 */
/*0120*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0xc], PT ; /* 0x0000030000007a0c */
/* 0x000fda0003f06070 */
/*0130*/ @P0 IADD3 R0, R0, -c[0x0][0xc], RZ ; /* 0x8000030000000a10 */
/* 0x000fe40007ffe0ff */
/*0140*/ @P0 IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105050810 */
/* 0x000fe40007ffe0ff */
/*0150*/ ISETP.GE.U32.AND P1, PT, R0, c[0x0][0xc], PT ; /* 0x0000030000007a0c */
/* 0x000fda0003f26070 */
/*0160*/ @P1 IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105051810 */
/* 0x000fe40007ffe0ff */
/*0170*/ @!P2 LOP3.LUT R5, RZ, c[0x0][0xc], RZ, 0x33, !PT ; /* 0x00000300ff05aa12 */
/* 0x000fca00078e33ff */
/*0180*/ IMAD R15, R5, R4, RZ ; /* 0x00000004050f7224 */
/* 0x001fca00078e02ff */
/*0190*/ IADD3 R0, R5, -0x1, R15 ; /* 0xffffffff05007810 */
/* 0x000fc80007ffe00f */
/*01a0*/ ISETP.GE.U32.AND P0, PT, R15, R0, PT ; /* 0x000000000f00720c */
/* 0x000fda0003f06070 */
/*01b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*01c0*/ S2R R14, SR_TID.X ; /* 0x00000000000e7919 */
/* 0x000e240000002100 */
/*01d0*/ LDG.E R17, [R2.64] ; /* 0x0000000602117981 */
/* 0x000ea4000c1e1900 */
/*01e0*/ ISETP.GT.AND P0, PT, R17, RZ, PT ; /* 0x000000ff1100720c */
/* 0x004fda0003f04270 */
/*01f0*/ @!P0 MOV R21, RZ ; /* 0x000000ff00158202 */
/* 0x001fe20000000f00 */
/*0200*/ @!P0 BRA 0x6f0 ; /* 0x000004e000008947 */
/* 0x000fea0003800000 */
/*0210*/ IADD3 R4, R17.reuse, -0x1, RZ ; /* 0xffffffff11047810 */
/* 0x040fe20007ffe0ff */
/*0220*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0230*/ LOP3.LUT R16, R17.reuse, 0x3, RZ, 0xc0, !PT ; /* 0x0000000311107812 */
/* 0x040fe200078ec0ff */
/*0240*/ IMAD R18, R17, R15, RZ ; /* 0x0000000f11127224 */
/* 0x000fe200078e02ff */
/*0250*/ ISETP.GE.U32.AND P1, PT, R4, 0x3, PT ; /* 0x000000030400780c */
/* 0x000fe20003f26070 */
/*0260*/ IMAD.MOV.U32 R21, RZ, RZ, RZ ; /* 0x000000ffff157224 */
/* 0x000fe200078e00ff */
/*0270*/ ISETP.NE.AND P0, PT, R16, RZ, PT ; /* 0x000000ff1000720c */
/* 0x000fd60003f05270 */
/*0280*/ @!P1 BRA 0x530 ; /* 0x000002a000009947 */
/* 0x000fea0003800000 */
/*0290*/ HFMA2.MMA R19, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff137435 */
/* 0x000fe200000001ff */
/*02a0*/ IMAD.IADD R22, R17.reuse, 0x1, R14.reuse ; /* 0x0000000111167824 */
/* 0x141fe200078e020e */
/*02b0*/ LEA R20, R17.reuse, R14.reuse, 0x1 ; /* 0x0000000e11147211 */
/* 0x0c0fe200078e08ff */
/*02c0*/ IMAD R24, R17, 0x3, R14 ; /* 0x0000000311187824 */
/* 0x000fe200078e020e */
/*02d0*/ MOV R26, R14 ; /* 0x0000000e001a7202 */
/* 0x000fe20000000f00 */
/*02e0*/ IMAD.IADD R27, R16, 0x1, -R17 ; /* 0x00000001101b7824 */
/* 0x000fe200078e0a11 */
/*02f0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fc80008000000 */
/*0300*/ IMAD.WIDE R4, R18, R19, c[0x0][0x160] ; /* 0x0000580012047625 */
/* 0x000fc800078e0213 */
/*0310*/ IMAD.WIDE.U32 R6, R26, R19.reuse, c[0x0][0x168] ; /* 0x00005a001a067625 */
/* 0x080fe200078e0013 */
/*0320*/ LDG.E R23, [R4.64] ; /* 0x0000000604177981 */
/* 0x0010a6000c1e1900 */
/*0330*/ IMAD.WIDE.U32 R8, R22, R19.reuse, c[0x0][0x168] ; /* 0x00005a0016087625 */
/* 0x080fe200078e0013 */
/*0340*/ LDG.E R25, [R4.64+0x4] ; /* 0x0000040604197981 */
/* 0x0000e8000c1e1900 */
/*0350*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x000ea8000c1e1900 */
/*0360*/ LDG.E R8, [R8.64] ; /* 0x0000000608087981 */
/* 0x000ee2000c1e1900 */
/*0370*/ IMAD.WIDE.U32 R10, R20, R19, c[0x0][0x168] ; /* 0x00005a00140a7625 */
/* 0x000fc600078e0013 */
/*0380*/ LDG.E R29, [R4.64+0x8] ; /* 0x00000806041d7981 */
/* 0x000128000c1e1900 */
/*0390*/ LDG.E R10, [R10.64] ; /* 0x000000060a0a7981 */
/* 0x000f22000c1e1900 */
/*03a0*/ IMAD.WIDE.U32 R12, R24, R19, c[0x0][0x168] ; /* 0x00005a00180c7625 */
/* 0x000fc600078e0013 */
/*03b0*/ LDG.E R28, [R4.64+0xc] ; /* 0x00000c06041c7981 */
/* 0x000168000c1e1900 */
/*03c0*/ LDG.E R12, [R12.64] ; /* 0x000000060c0c7981 */
/* 0x000f62000c1e1900 */
/*03d0*/ I2F R21, R21 ; /* 0x0000001500157306 */
/* 0x002ea20000201400 */
/*03e0*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fe2000fffe03f */
/*03f0*/ IADD3 R4, P2, R4, 0x10, RZ ; /* 0x0000001004047810 */
/* 0x001fe20007f5e0ff */
/*0400*/ IMAD R24, R17.reuse, 0x4, R24 ; /* 0x0000000411187824 */
/* 0x040fe200078e0218 */
/*0410*/ LEA R22, R17.reuse, R22, 0x2 ; /* 0x0000001611167211 */
/* 0x040fe400078e10ff */
/*0420*/ LEA R20, R17.reuse, R20, 0x2 ; /* 0x0000001411147211 */
/* 0x040fe200078e10ff */
/*0430*/ IMAD.X R5, RZ, RZ, R5, P2 ; /* 0x000000ffff057224 */
/* 0x000fe200010e0605 */
/*0440*/ LEA R26, R17, R26, 0x2 ; /* 0x0000001a111a7211 */
/* 0x000fe200078e10ff */
/*0450*/ FFMA R6, R6, R23, R21 ; /* 0x0000001706067223 */
/* 0x004fcc0000000015 */
/*0460*/ F2I.TRUNC.NTZ R6, R6 ; /* 0x0000000600067305 */
/* 0x000e30000020f100 */
/*0470*/ I2F R7, R6 ; /* 0x0000000600077306 */
/* 0x001ee40000201400 */
/*0480*/ FFMA R7, R8, R25, R7 ; /* 0x0000001908077223 */
/* 0x008fcc0000000007 */
/*0490*/ F2I.TRUNC.NTZ R7, R7 ; /* 0x0000000700077305 */
/* 0x000e30000020f100 */
/*04a0*/ I2F R8, R7 ; /* 0x0000000700087306 */
/* 0x001f240000201400 */
/*04b0*/ FFMA R8, R10, R29, R8 ; /* 0x0000001d0a087223 */
/* 0x010fcc0000000008 */
/*04c0*/ F2I.TRUNC.NTZ R8, R8 ; /* 0x0000000800087305 */
/* 0x000e22000020f100 */
/*04d0*/ IADD3 R10, R27, UR4, RZ ; /* 0x000000041b0a7c10 */
/* 0x000fce000fffe0ff */
/*04e0*/ I2F R9, R8 ; /* 0x0000000800097306 */
/* 0x001f620000201400 */
/*04f0*/ ISETP.NE.AND P1, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fe20003f25270 */
/*0500*/ FFMA R9, R12, R28, R9 ; /* 0x0000001c0c097223 */
/* 0x020fcc0000000009 */
/*0510*/ F2I.TRUNC.NTZ R21, R9 ; /* 0x0000000900157305 */
/* 0x00006c000020f100 */
/*0520*/ @P1 BRA 0x310 ; /* 0xfffffde000001947 */
/* 0x000fea000383ffff */
/*0530*/ @!P0 BRA 0x6f0 ; /* 0x000001b000008947 */
/* 0x000fea0003800000 */
/*0540*/ HFMA2.MMA R13, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0d7435 */
/* 0x000fe200000001ff */
/*0550*/ IADD3 R4, R18, UR4, RZ ; /* 0x0000000412047c10 */
/* 0x000fe2000fffe0ff */
/*0560*/ IMAD R8, R17, UR4, R14 ; /* 0x0000000411087c24 */
/* 0x001fd0000f8e020e */
/*0570*/ IMAD.WIDE.U32 R6, R8, R13, c[0x0][0x168] ; /* 0x00005a0008067625 */
/* 0x000fc800078e000d */
/*0580*/ IMAD.WIDE R4, R4, R13, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e020d */
/*0590*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x000ea8000c1e1900 */
/*05a0*/ LDG.E R9, [R4.64] ; /* 0x0000000604097981 */
/* 0x000ea2000c1e1900 */
/*05b0*/ I2F R21, R21 ; /* 0x0000001500157306 */
/* 0x002ea20000201400 */
/*05c0*/ ISETP.NE.AND P0, PT, R16, 0x1, PT ; /* 0x000000011000780c */
/* 0x000fe20003f05270 */
/*05d0*/ FFMA R9, R6, R9, R21 ; /* 0x0000000906097223 */
/* 0x004fcc0000000015 */
/*05e0*/ F2I.TRUNC.NTZ R21, R9 ; /* 0x0000000900157305 */
/* 0x00006c000020f100 */
/*05f0*/ @!P0 BRA 0x6f0 ; /* 0x000000f000008947 */
/* 0x000fea0003800000 */
/*0600*/ IMAD.IADD R8, R17, 0x1, R8 ; /* 0x0000000111087824 */
/* 0x000fe200078e0208 */
/*0610*/ LDG.E R11, [R4.64+0x4] ; /* 0x00000406040b7981 */
/* 0x000ea6000c1e1900 */
/*0620*/ IMAD.WIDE.U32 R6, R8, R13, c[0x0][0x168] ; /* 0x00005a0008067625 */
/* 0x000fcc00078e000d */
/*0630*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x000ea2000c1e1900 */
/*0640*/ ISETP.NE.AND P0, PT, R16, 0x2, PT ; /* 0x000000021000780c */
/* 0x000fda0003f05270 */
/*0650*/ @P0 IADD3 R8, R17, R8, RZ ; /* 0x0000000811080210 */
/* 0x000fca0007ffe0ff */
/*0660*/ @P0 IMAD.WIDE.U32 R8, R8, R13, c[0x0][0x168] ; /* 0x00005a0008080625 */
/* 0x001fe400078e000d */
/*0670*/ @P0 LDG.E R13, [R4.64+0x8] ; /* 0x00000806040d0981 */
/* 0x000ee8000c1e1900 */
/*0680*/ @P0 LDG.E R8, [R8.64] ; /* 0x0000000608080981 */
/* 0x000ee2000c1e1900 */
/*0690*/ I2F R21, R21 ; /* 0x0000001500157306 */
/* 0x002ea40000201400 */
/*06a0*/ FFMA R11, R6, R11, R21 ; /* 0x0000000b060b7223 */
/* 0x004fcc0000000015 */
/*06b0*/ F2I.TRUNC.NTZ R21, R11 ; /* 0x0000000b00157305 */
/* 0x000e30000020f100 */
/*06c0*/ @P0 I2F R10, R21 ; /* 0x00000015000a0306 */
/* 0x001ee40000201400 */
/*06d0*/ @P0 FFMA R13, R8, R13, R10 ; /* 0x0000000d080d0223 */
/* 0x008fcc000000000a */
/*06e0*/ @P0 F2I.TRUNC.NTZ R21, R13 ; /* 0x0000000d00150305 */
/* 0x000064000020f100 */
/*06f0*/ IMAD R4, R17, R15, R14 ; /* 0x0000000f11047224 */
/* 0x001fe200078e020e */
/*0700*/ I2F R21, R21 ; /* 0x0000001500157306 */
/* 0x002e220000201400 */
/*0710*/ IADD3 R15, R15, 0x1, RZ ; /* 0x000000010f0f7810 */
/* 0x000fe40007ffe0ff */
/*0720*/ MOV R5, 0x4 ; /* 0x0000000400057802 */
/* 0x000fe40000000f00 */
/*0730*/ ISETP.GE.U32.AND P0, PT, R15, R0, PT ; /* 0x000000000f00720c */
/* 0x000fc60003f06070 */
/*0740*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fca00078e0005 */
/*0750*/ STG.E [R4.64], R21 ; /* 0x0000001504007986 */
/* 0x0011ea000c101906 */
/*0760*/ @!P0 BRA 0x1d0 ; /* 0xfffffa6000008947 */
/* 0x000fea000383ffff */
/*0770*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0780*/ BRA 0x780; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0790*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0800*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0810*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0820*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0830*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0840*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0850*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0860*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0870*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15MatrixMulDevicePfS_S_Pi
.globl _Z15MatrixMulDevicePfS_S_Pi
.p2align 8
.type _Z15MatrixMulDevicePfS_S_Pi,@function
_Z15MatrixMulDevicePfS_S_Pi:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x20
s_load_b64 s[2:3], s[0:1], 0x18
s_waitcnt lgkmcnt(0)
v_cvt_f32_u32_e32 v1, s4
s_load_b32 s12, s[2:3], 0x0
s_sub_i32 s3, 0, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
v_cvt_u32_f32_e32 v1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s2, v1
s_mul_i32 s3, s3, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_hi_u32 s3, s2, s3
s_add_i32 s2, s2, s3
s_waitcnt lgkmcnt(0)
s_mul_hi_u32 s2, s12, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_mul_i32 s3, s2, s4
s_add_i32 s5, s2, 1
s_sub_i32 s3, s12, s3
s_sub_i32 s6, s3, s4
s_cmp_ge_u32 s3, s4
s_cselect_b32 s2, s5, s2
s_cselect_b32 s3, s6, s3
s_add_i32 s5, s2, 1
s_cmp_ge_u32 s3, s4
s_cselect_b32 s8, s5, s2
s_add_i32 s2, s15, 1
s_mul_i32 s13, s8, s15
s_mul_i32 s14, s8, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s14, s14, -1
s_cmp_ge_u32 s13, s14
s_cbranch_scc1 .LBB0_6
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
s_cmp_gt_i32 s12, 0
v_mov_b32_e32 v2, 0
s_cselect_b32 s0, -1, 0
s_mul_i32 s1, s8, s15
v_cndmask_b32_e64 v1, 0, 1, s0
s_mul_i32 s8, s1, s12
s_delay_alu instid0(VALU_DEP_1)
v_cmp_ne_u32_e64 s0, 1, v1
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
v_mad_u64_u32 v[4:5], null, s13, s12, v[0:1]
v_mov_b32_e32 v5, v2
v_cvt_f32_i32_e32 v1, v3
s_add_i32 s13, s13, 1
s_add_i32 s8, s8, s12
s_cmp_lt_u32 s13, s14
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s2, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v5, vcc_lo
global_store_b32 v[3:4], v1, off
s_cbranch_scc0 .LBB0_6
.LBB0_3:
v_mov_b32_e32 v3, v2
s_delay_alu instid0(VALU_DEP_2)
s_and_b32 vcc_lo, exec_lo, s0
s_cbranch_vccnz .LBB0_2
s_ashr_i32 s9, s8, 31
v_mov_b32_e32 v3, 0
s_lshl_b64 s[10:11], s[8:9], 2
v_mov_b32_e32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s10, s4, s10
s_addc_u32 s11, s5, s11
s_mov_b32 s1, s12
.p2align 6
.LBB0_5:
v_lshlrev_b64 v[4:5], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_cvt_f32_i32_e32 v3, v3
s_add_i32 s1, s1, -1
v_add_nc_u32_e32 v1, s12, v1
v_add_co_u32 v4, vcc_lo, s6, v4
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo
global_load_b32 v6, v2, s[10:11]
global_load_b32 v4, v[4:5], off
s_add_u32 s10, s10, 4
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s1, 0
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v3, v6, v4
s_delay_alu instid0(VALU_DEP_1)
v_cvt_i32_f32_e32 v3, v3
s_cbranch_scc1 .LBB0_5
s_branch .LBB0_2
.LBB0_6:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15MatrixMulDevicePfS_S_Pi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15MatrixMulDevicePfS_S_Pi, .Lfunc_end0-_Z15MatrixMulDevicePfS_S_Pi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15MatrixMulDevicePfS_S_Pi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15MatrixMulDevicePfS_S_Pi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00003130_00000000-6_MatrixMulDevice.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z41__device_stub__Z15MatrixMulDevicePfS_S_PiPfS_S_Pi
.type _Z41__device_stub__Z15MatrixMulDevicePfS_S_PiPfS_S_Pi, @function
_Z41__device_stub__Z15MatrixMulDevicePfS_S_PiPfS_S_Pi:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z15MatrixMulDevicePfS_S_Pi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z41__device_stub__Z15MatrixMulDevicePfS_S_PiPfS_S_Pi, .-_Z41__device_stub__Z15MatrixMulDevicePfS_S_PiPfS_S_Pi
.globl _Z15MatrixMulDevicePfS_S_Pi
.type _Z15MatrixMulDevicePfS_S_Pi, @function
_Z15MatrixMulDevicePfS_S_Pi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z15MatrixMulDevicePfS_S_PiPfS_S_Pi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15MatrixMulDevicePfS_S_Pi, .-_Z15MatrixMulDevicePfS_S_Pi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z15MatrixMulDevicePfS_S_Pi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z15MatrixMulDevicePfS_S_Pi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "MatrixMulDevice.hip"
.globl _Z30__device_stub__MatrixMulDevicePfS_S_Pi # -- Begin function _Z30__device_stub__MatrixMulDevicePfS_S_Pi
.p2align 4, 0x90
.type _Z30__device_stub__MatrixMulDevicePfS_S_Pi,@function
_Z30__device_stub__MatrixMulDevicePfS_S_Pi: # @_Z30__device_stub__MatrixMulDevicePfS_S_Pi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movq %rcx, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15MatrixMulDevicePfS_S_Pi, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z30__device_stub__MatrixMulDevicePfS_S_Pi, .Lfunc_end0-_Z30__device_stub__MatrixMulDevicePfS_S_Pi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15MatrixMulDevicePfS_S_Pi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15MatrixMulDevicePfS_S_Pi,@object # @_Z15MatrixMulDevicePfS_S_Pi
.section .rodata,"a",@progbits
.globl _Z15MatrixMulDevicePfS_S_Pi
.p2align 3, 0x0
_Z15MatrixMulDevicePfS_S_Pi:
.quad _Z30__device_stub__MatrixMulDevicePfS_S_Pi
.size _Z15MatrixMulDevicePfS_S_Pi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15MatrixMulDevicePfS_S_Pi"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__MatrixMulDevicePfS_S_Pi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15MatrixMulDevicePfS_S_Pi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
//const int N = 16;
const int blocksize = 16;
__global__
void hello(int *a, int *b)
{
int z = blockDim.x * blockIdx.x + threadIdx.x;
a[z] = b[z]* b[z] * b[z];
//a[threadIdx.x] = 5;
}
int main(int argc, char** argv)
{
int SIZE = atoi(argv[1]);
int *a =(int*) malloc(SIZE * sizeof(int));
int *b =(int*) malloc(SIZE * sizeof(int));
for(int i=0; i<SIZE; i++) {
a[i]=0;
b[i]=rand()%100+1;
}
// int a[N] = {};
//int b[N] = {16, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int *ad;
int *bd;
const int csize = SIZE*sizeof(int);
const int isize = SIZE*sizeof(int);
printf("input ");
for (int i=0;i<SIZE;i++)
printf("%d ", b[i]);
printf("\n");
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
//hello<<<dimGrid, dimBlock>>>(ad, bd);
int cores = 512;
//int blocks = SIZE/cores + 16;
int blocks = 16;
int threadsPerBlock = 256;
//int blocksPerGrid =(SIZE + threadsPerBlock – 1) / threadsPerBlock;
int blocksPerGrid = ( SIZE + threadsPerBlock -1) / threadsPerBlock;
printf("blocks = %d cores = %d\n", blocks, cores);
hello<<<blocksPerGrid, threadsPerBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
printf("ouput ");
for (int i=0;i<SIZE;i++)
printf("%d ", a[i]);
printf("\n");
//printf("%s\n", a);
free(a);
free(b);
return EXIT_SUCCESS;
} | code for sm_80
Function : _Z5helloPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fcc00078e0205 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fc800078e0205 */
/*0090*/ IMAD R7, R2, R2, RZ ; /* 0x0000000202077224 */
/* 0x004fc800078e02ff */
/*00a0*/ IMAD R7, R2, R7, RZ ; /* 0x0000000702077224 */
/* 0x000fca00078e02ff */
/*00b0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
//const int N = 16;
const int blocksize = 16;
__global__
void hello(int *a, int *b)
{
int z = blockDim.x * blockIdx.x + threadIdx.x;
a[z] = b[z]* b[z] * b[z];
//a[threadIdx.x] = 5;
}
int main(int argc, char** argv)
{
int SIZE = atoi(argv[1]);
int *a =(int*) malloc(SIZE * sizeof(int));
int *b =(int*) malloc(SIZE * sizeof(int));
for(int i=0; i<SIZE; i++) {
a[i]=0;
b[i]=rand()%100+1;
}
// int a[N] = {};
//int b[N] = {16, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int *ad;
int *bd;
const int csize = SIZE*sizeof(int);
const int isize = SIZE*sizeof(int);
printf("input ");
for (int i=0;i<SIZE;i++)
printf("%d ", b[i]);
printf("\n");
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
//hello<<<dimGrid, dimBlock>>>(ad, bd);
int cores = 512;
//int blocks = SIZE/cores + 16;
int blocks = 16;
int threadsPerBlock = 256;
//int blocksPerGrid =(SIZE + threadsPerBlock – 1) / threadsPerBlock;
int blocksPerGrid = ( SIZE + threadsPerBlock -1) / threadsPerBlock;
printf("blocks = %d cores = %d\n", blocks, cores);
hello<<<blocksPerGrid, threadsPerBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
printf("ouput ");
for (int i=0;i<SIZE;i++)
printf("%d ", a[i]);
printf("\n");
//printf("%s\n", a);
free(a);
free(b);
return EXIT_SUCCESS;
} | .file "tmpxft_001a7480_00000000-6_hello.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z5helloPiS_PiS_
.type _Z26__device_stub__Z5helloPiS_PiS_, @function
_Z26__device_stub__Z5helloPiS_PiS_:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z5helloPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z26__device_stub__Z5helloPiS_PiS_, .-_Z26__device_stub__Z5helloPiS_PiS_
.globl _Z5helloPiS_
.type _Z5helloPiS_, @function
_Z5helloPiS_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z5helloPiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z5helloPiS_, .-_Z5helloPiS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "input "
.LC1:
.string "%d "
.LC2:
.string "\n"
.LC3:
.string "blocks = %d cores = %d\n"
.LC4:
.string "ouput "
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r13
movslq %eax, %r15
leaq 0(,%r15,4), %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbp
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r12
testl %r13d, %r13d
jle .L12
leal -1(%r13), %r14d
movl $0, %ebx
.L13:
movl $0, 0(%rbp,%rbx,4)
call rand@PLT
movslq %eax, %rdx
imulq $1374389535, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $100, %edx, %edx
subl %edx, %eax
addl $1, %eax
movl %eax, (%r12,%rbx,4)
movq %rbx, %rax
addq $1, %rbx
cmpq %r14, %rax
jne .L13
leal 0(,%r15,4), %eax
movl %eax, 12(%rsp)
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r12, %rbx
leaq 4(%r12,%r14,4), %r15
leaq .LC1(%rip), %r14
.L14:
movl (%rbx), %edx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r15, %rbx
jne .L14
.L18:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movslq 12(%rsp), %rbx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $512, %ecx
movl $16, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $256, 44(%rsp)
movl $1, 48(%rsp)
leal 510(%r13), %eax
movl %r13d, %edx
addl $255, %edx
cmovns %edx, %eax
sarl $8, %eax
movl %eax, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L15:
movl $2, %ecx
movq %rbx, %rdx
movq 16(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %r13d, %r13d
jle .L16
movq %rbp, %rbx
leal -1(%r13), %eax
leaq 4(%rbp,%rax,4), %r14
leaq .LC1(%rip), %r13
.L17:
movl (%rbx), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r14, %rbx
jne .L17
.L16:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbp, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L26
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z26__device_stub__Z5helloPiS_PiS_
jmp .L15
.L12:
leal 0(,%r15,4), %eax
movl %eax, 12(%rsp)
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L18
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC5:
.string "_Z5helloPiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z5helloPiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
//const int N = 16;
const int blocksize = 16;
__global__
void hello(int *a, int *b)
{
int z = blockDim.x * blockIdx.x + threadIdx.x;
a[z] = b[z]* b[z] * b[z];
//a[threadIdx.x] = 5;
}
int main(int argc, char** argv)
{
int SIZE = atoi(argv[1]);
int *a =(int*) malloc(SIZE * sizeof(int));
int *b =(int*) malloc(SIZE * sizeof(int));
for(int i=0; i<SIZE; i++) {
a[i]=0;
b[i]=rand()%100+1;
}
// int a[N] = {};
//int b[N] = {16, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int *ad;
int *bd;
const int csize = SIZE*sizeof(int);
const int isize = SIZE*sizeof(int);
printf("input ");
for (int i=0;i<SIZE;i++)
printf("%d ", b[i]);
printf("\n");
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
//hello<<<dimGrid, dimBlock>>>(ad, bd);
int cores = 512;
//int blocks = SIZE/cores + 16;
int blocks = 16;
int threadsPerBlock = 256;
//int blocksPerGrid =(SIZE + threadsPerBlock – 1) / threadsPerBlock;
int blocksPerGrid = ( SIZE + threadsPerBlock -1) / threadsPerBlock;
printf("blocks = %d cores = %d\n", blocks, cores);
hello<<<blocksPerGrid, threadsPerBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
printf("ouput ");
for (int i=0;i<SIZE;i++)
printf("%d ", a[i]);
printf("\n");
//printf("%s\n", a);
free(a);
free(b);
return EXIT_SUCCESS;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
//const int N = 16;
const int blocksize = 16;
__global__
void hello(int *a, int *b)
{
int z = blockDim.x * blockIdx.x + threadIdx.x;
a[z] = b[z]* b[z] * b[z];
//a[threadIdx.x] = 5;
}
int main(int argc, char** argv)
{
int SIZE = atoi(argv[1]);
int *a =(int*) malloc(SIZE * sizeof(int));
int *b =(int*) malloc(SIZE * sizeof(int));
for(int i=0; i<SIZE; i++) {
a[i]=0;
b[i]=rand()%100+1;
}
// int a[N] = {};
//int b[N] = {16, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int *ad;
int *bd;
const int csize = SIZE*sizeof(int);
const int isize = SIZE*sizeof(int);
printf("input ");
for (int i=0;i<SIZE;i++)
printf("%d ", b[i]);
printf("\n");
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
//hello<<<dimGrid, dimBlock>>>(ad, bd);
int cores = 512;
//int blocks = SIZE/cores + 16;
int blocks = 16;
int threadsPerBlock = 256;
//int blocksPerGrid =(SIZE + threadsPerBlock – 1) / threadsPerBlock;
int blocksPerGrid = ( SIZE + threadsPerBlock -1) / threadsPerBlock;
printf("blocks = %d cores = %d\n", blocks, cores);
hello<<<blocksPerGrid, threadsPerBlock>>>(ad, bd);
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
hipFree( ad );
hipFree( bd );
printf("ouput ");
for (int i=0;i<SIZE;i++)
printf("%d ", a[i]);
printf("\n");
//printf("%s\n", a);
free(a);
free(b);
return EXIT_SUCCESS;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
//const int N = 16;
const int blocksize = 16;
__global__
void hello(int *a, int *b)
{
int z = blockDim.x * blockIdx.x + threadIdx.x;
a[z] = b[z]* b[z] * b[z];
//a[threadIdx.x] = 5;
}
int main(int argc, char** argv)
{
int SIZE = atoi(argv[1]);
int *a =(int*) malloc(SIZE * sizeof(int));
int *b =(int*) malloc(SIZE * sizeof(int));
for(int i=0; i<SIZE; i++) {
a[i]=0;
b[i]=rand()%100+1;
}
// int a[N] = {};
//int b[N] = {16, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int *ad;
int *bd;
const int csize = SIZE*sizeof(int);
const int isize = SIZE*sizeof(int);
printf("input ");
for (int i=0;i<SIZE;i++)
printf("%d ", b[i]);
printf("\n");
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
//hello<<<dimGrid, dimBlock>>>(ad, bd);
int cores = 512;
//int blocks = SIZE/cores + 16;
int blocks = 16;
int threadsPerBlock = 256;
//int blocksPerGrid =(SIZE + threadsPerBlock – 1) / threadsPerBlock;
int blocksPerGrid = ( SIZE + threadsPerBlock -1) / threadsPerBlock;
printf("blocks = %d cores = %d\n", blocks, cores);
hello<<<blocksPerGrid, threadsPerBlock>>>(ad, bd);
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
hipFree( ad );
hipFree( bd );
printf("ouput ");
for (int i=0;i<SIZE;i++)
printf("%d ", a[i]);
printf("\n");
//printf("%s\n", a);
free(a);
free(b);
return EXIT_SUCCESS;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5helloPiS_
.globl _Z5helloPiS_
.p2align 8
.type _Z5helloPiS_,@function
_Z5helloPiS_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b128 s[0:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_mul_lo_u32 v3, v2, v2
s_delay_alu instid0(VALU_DEP_1)
v_mul_lo_u32 v2, v3, v2
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5helloPiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5helloPiS_, .Lfunc_end0-_Z5helloPiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5helloPiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z5helloPiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
//const int N = 16;
const int blocksize = 16;
__global__
void hello(int *a, int *b)
{
int z = blockDim.x * blockIdx.x + threadIdx.x;
a[z] = b[z]* b[z] * b[z];
//a[threadIdx.x] = 5;
}
int main(int argc, char** argv)
{
int SIZE = atoi(argv[1]);
int *a =(int*) malloc(SIZE * sizeof(int));
int *b =(int*) malloc(SIZE * sizeof(int));
for(int i=0; i<SIZE; i++) {
a[i]=0;
b[i]=rand()%100+1;
}
// int a[N] = {};
//int b[N] = {16, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int *ad;
int *bd;
const int csize = SIZE*sizeof(int);
const int isize = SIZE*sizeof(int);
printf("input ");
for (int i=0;i<SIZE;i++)
printf("%d ", b[i]);
printf("\n");
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
//hello<<<dimGrid, dimBlock>>>(ad, bd);
int cores = 512;
//int blocks = SIZE/cores + 16;
int blocks = 16;
int threadsPerBlock = 256;
//int blocksPerGrid =(SIZE + threadsPerBlock – 1) / threadsPerBlock;
int blocksPerGrid = ( SIZE + threadsPerBlock -1) / threadsPerBlock;
printf("blocks = %d cores = %d\n", blocks, cores);
hello<<<blocksPerGrid, threadsPerBlock>>>(ad, bd);
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
hipFree( ad );
hipFree( bd );
printf("ouput ");
for (int i=0;i<SIZE;i++)
printf("%d ", a[i]);
printf("\n");
//printf("%s\n", a);
free(a);
free(b);
return EXIT_SUCCESS;
} | .text
.file "hello.hip"
.globl _Z20__device_stub__helloPiS_ # -- Begin function _Z20__device_stub__helloPiS_
.p2align 4, 0x90
.type _Z20__device_stub__helloPiS_,@function
_Z20__device_stub__helloPiS_: # @_Z20__device_stub__helloPiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z5helloPiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z20__device_stub__helloPiS_, .Lfunc_end0-_Z20__device_stub__helloPiS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $96, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
movslq %r15d, %r12
leaq (,%r12,4), %r14
movq %r14, %rdi
callq malloc
movq %rax, %rbx
movq %r14, %rdi
callq malloc
movq %rax, %r14
testl %r12d, %r12d
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
movl %r15d, %r12d
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $0, (%rbx,%r13,4)
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
negl %ecx
addl %ecx, %eax
incl %eax
movl %eax, (%r14,%r13,4)
incq %r13
cmpq %r13, %r12
jne .LBB1_2
.LBB1_3: # %._crit_edge
movl $.L.str, %edi
xorl %eax, %eax
callq printf
testl %r15d, %r15d
jle .LBB1_6
# %bb.4: # %.lr.ph56.preheader
movl %r15d, %r12d
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB1_5: # %.lr.ph56
# =>This Inner Loop Header: Depth=1
movl (%r14,%r13,4), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
incq %r13
cmpq %r13, %r12
jne .LBB1_5
.LBB1_6: # %._crit_edge57
movl $10, %edi
callq putchar@PLT
movq %r15, %r12
shlq $34, %r12
sarq $32, %r12
movq %rsp, %rdi
movq %r12, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
movq (%rsp), %rdi
movq %rbx, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
leal 255(%r15), %eax
leal 510(%r15), %r13d
testl %eax, %eax
cmovnsl %eax, %r13d
sarl $8, %r13d
movl $.L.str.3, %edi
movl $16, %esi
movl $512, %edx # imm = 0x200
xorl %eax, %eax
callq printf
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %r13
orq $256, %rdx # imm = 0x100
movq %r13, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_8
# %bb.7:
movq (%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5helloPiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_8:
movq (%rsp), %rsi
movq %rbx, %rdi
movq %r12, %rdx
movl $2, %ecx
callq hipMemcpy
movq (%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
testl %r15d, %r15d
jle .LBB1_11
# %bb.9: # %.lr.ph60.preheader
movl %r15d, %r15d
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_10: # %.lr.ph60
# =>This Inner Loop Header: Depth=1
movl (%rbx,%r12,4), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
incq %r12
cmpq %r12, %r15
jne .LBB1_10
.LBB1_11: # %._crit_edge61
movl $10, %edi
callq putchar@PLT
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
xorl %eax, %eax
addq $96, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5helloPiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5helloPiS_,@object # @_Z5helloPiS_
.section .rodata,"a",@progbits
.globl _Z5helloPiS_
.p2align 3, 0x0
_Z5helloPiS_:
.quad _Z20__device_stub__helloPiS_
.size _Z5helloPiS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "input "
.size .L.str, 7
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%d "
.size .L.str.1, 4
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "blocks = %d cores = %d\n"
.size .L.str.3, 24
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "ouput "
.size .L.str.4, 7
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z5helloPiS_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__helloPiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5helloPiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z5helloPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fcc00078e0205 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fc800078e0205 */
/*0090*/ IMAD R7, R2, R2, RZ ; /* 0x0000000202077224 */
/* 0x004fc800078e02ff */
/*00a0*/ IMAD R7, R2, R7, RZ ; /* 0x0000000702077224 */
/* 0x000fca00078e02ff */
/*00b0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5helloPiS_
.globl _Z5helloPiS_
.p2align 8
.type _Z5helloPiS_,@function
_Z5helloPiS_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b128 s[0:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_mul_lo_u32 v3, v2, v2
s_delay_alu instid0(VALU_DEP_1)
v_mul_lo_u32 v2, v3, v2
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5helloPiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5helloPiS_, .Lfunc_end0-_Z5helloPiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5helloPiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z5helloPiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001a7480_00000000-6_hello.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z5helloPiS_PiS_
.type _Z26__device_stub__Z5helloPiS_PiS_, @function
_Z26__device_stub__Z5helloPiS_PiS_:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z5helloPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z26__device_stub__Z5helloPiS_PiS_, .-_Z26__device_stub__Z5helloPiS_PiS_
.globl _Z5helloPiS_
.type _Z5helloPiS_, @function
_Z5helloPiS_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z5helloPiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z5helloPiS_, .-_Z5helloPiS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "input "
.LC1:
.string "%d "
.LC2:
.string "\n"
.LC3:
.string "blocks = %d cores = %d\n"
.LC4:
.string "ouput "
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r13
movslq %eax, %r15
leaq 0(,%r15,4), %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbp
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r12
testl %r13d, %r13d
jle .L12
leal -1(%r13), %r14d
movl $0, %ebx
.L13:
movl $0, 0(%rbp,%rbx,4)
call rand@PLT
movslq %eax, %rdx
imulq $1374389535, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $100, %edx, %edx
subl %edx, %eax
addl $1, %eax
movl %eax, (%r12,%rbx,4)
movq %rbx, %rax
addq $1, %rbx
cmpq %r14, %rax
jne .L13
leal 0(,%r15,4), %eax
movl %eax, 12(%rsp)
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r12, %rbx
leaq 4(%r12,%r14,4), %r15
leaq .LC1(%rip), %r14
.L14:
movl (%rbx), %edx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r15, %rbx
jne .L14
.L18:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movslq 12(%rsp), %rbx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $512, %ecx
movl $16, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $256, 44(%rsp)
movl $1, 48(%rsp)
leal 510(%r13), %eax
movl %r13d, %edx
addl $255, %edx
cmovns %edx, %eax
sarl $8, %eax
movl %eax, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L15:
movl $2, %ecx
movq %rbx, %rdx
movq 16(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %r13d, %r13d
jle .L16
movq %rbp, %rbx
leal -1(%r13), %eax
leaq 4(%rbp,%rax,4), %r14
leaq .LC1(%rip), %r13
.L17:
movl (%rbx), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r14, %rbx
jne .L17
.L16:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbp, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L26
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z26__device_stub__Z5helloPiS_PiS_
jmp .L15
.L12:
leal 0(,%r15,4), %eax
movl %eax, 12(%rsp)
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L18
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC5:
.string "_Z5helloPiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z5helloPiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "hello.hip"
.globl _Z20__device_stub__helloPiS_ # -- Begin function _Z20__device_stub__helloPiS_
.p2align 4, 0x90
.type _Z20__device_stub__helloPiS_,@function
_Z20__device_stub__helloPiS_: # @_Z20__device_stub__helloPiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z5helloPiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z20__device_stub__helloPiS_, .Lfunc_end0-_Z20__device_stub__helloPiS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $96, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
movslq %r15d, %r12
leaq (,%r12,4), %r14
movq %r14, %rdi
callq malloc
movq %rax, %rbx
movq %r14, %rdi
callq malloc
movq %rax, %r14
testl %r12d, %r12d
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
movl %r15d, %r12d
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $0, (%rbx,%r13,4)
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
negl %ecx
addl %ecx, %eax
incl %eax
movl %eax, (%r14,%r13,4)
incq %r13
cmpq %r13, %r12
jne .LBB1_2
.LBB1_3: # %._crit_edge
movl $.L.str, %edi
xorl %eax, %eax
callq printf
testl %r15d, %r15d
jle .LBB1_6
# %bb.4: # %.lr.ph56.preheader
movl %r15d, %r12d
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB1_5: # %.lr.ph56
# =>This Inner Loop Header: Depth=1
movl (%r14,%r13,4), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
incq %r13
cmpq %r13, %r12
jne .LBB1_5
.LBB1_6: # %._crit_edge57
movl $10, %edi
callq putchar@PLT
movq %r15, %r12
shlq $34, %r12
sarq $32, %r12
movq %rsp, %rdi
movq %r12, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
movq (%rsp), %rdi
movq %rbx, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
leal 255(%r15), %eax
leal 510(%r15), %r13d
testl %eax, %eax
cmovnsl %eax, %r13d
sarl $8, %r13d
movl $.L.str.3, %edi
movl $16, %esi
movl $512, %edx # imm = 0x200
xorl %eax, %eax
callq printf
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %r13
orq $256, %rdx # imm = 0x100
movq %r13, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_8
# %bb.7:
movq (%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5helloPiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_8:
movq (%rsp), %rsi
movq %rbx, %rdi
movq %r12, %rdx
movl $2, %ecx
callq hipMemcpy
movq (%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
testl %r15d, %r15d
jle .LBB1_11
# %bb.9: # %.lr.ph60.preheader
movl %r15d, %r15d
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_10: # %.lr.ph60
# =>This Inner Loop Header: Depth=1
movl (%rbx,%r12,4), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
incq %r12
cmpq %r12, %r15
jne .LBB1_10
.LBB1_11: # %._crit_edge61
movl $10, %edi
callq putchar@PLT
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
xorl %eax, %eax
addq $96, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5helloPiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5helloPiS_,@object # @_Z5helloPiS_
.section .rodata,"a",@progbits
.globl _Z5helloPiS_
.p2align 3, 0x0
_Z5helloPiS_:
.quad _Z20__device_stub__helloPiS_
.size _Z5helloPiS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "input "
.size .L.str, 7
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%d "
.size .L.str.1, 4
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "blocks = %d cores = %d\n"
.size .L.str.3, 24
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "ouput "
.size .L.str.4, 7
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z5helloPiS_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__helloPiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5helloPiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
for (int i=0; i < var_3; ++i) {
if (comp == asinf(+1.6052E-29f * (var_4 - (+1.3418E-44f * -1.1849E-36f)))) {
float tmp_1 = +1.4940E-23f;
float tmp_2 = -1.6240E35f + -1.2672E-41f + +1.0623E34f + atan2f(logf(var_6 * -0.0f), (-1.4919E-42f * atan2f((-1.9619E-37f * (-1.6127E-37f + acosf(var_7 * -1.0424E-41f / -1.8312E-36f * var_8))), var_9 * (-1.5333E-43f * var_10 / powf(var_11 - coshf((var_12 - coshf(var_13 / -1.9055E20f))), -1.0315E9f)))));
comp = tmp_2 * tmp_1 - +1.6798E36f * (+1.1643E34f - (var_14 * +1.4241E36f * (var_15 + var_16)));
comp += var_17 / floorf(+0.0f);
for (int i=0; i < var_5; ++i) {
comp += +1.6689E-43f - asinf(-1.4086E35f / +0.0f * var_18 + var_19);
}
if (comp == (+1.3292E34f - var_20 * (-1.9025E-42f - var_21))) {
comp = +1.9010E19f * var_22 * -1.4243E-5f * tanhf((-1.4812E-35f * +1.5328E36f));
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23);
cudaDeviceSynchronize();
return 0;
} | .file "tmpxft_000990a0_00000000-6_test.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11initPointerf
.type _Z11initPointerf, @function
_Z11initPointerf:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movd %xmm0, %ebx
movl $40, %edi
call malloc@PLT
movq %rax, %rdx
leaq 40(%rax), %rcx
.L4:
movl %ebx, (%rdx)
addq $4, %rdx
cmpq %rcx, %rdx
jne .L4
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z11initPointerf, .-_Z11initPointerf
.globl _Z47__device_stub__Z7computefiiififfffffffffffffffffiiififffffffffffffffff
.type _Z47__device_stub__Z7computefiiififfffffffffffffffffiiififffffffffffffffff, @function
_Z47__device_stub__Z7computefiiififfffffffffffffffffiiififffffffffffffffff:
.LFB2083:
.cfi_startproc
endbr64
subq $312, %rsp
.cfi_def_cfa_offset 320
movss %xmm0, 44(%rsp)
movl %edi, 40(%rsp)
movl %esi, 36(%rsp)
movl %edx, 32(%rsp)
movss %xmm1, 28(%rsp)
movl %ecx, 24(%rsp)
movss %xmm2, 20(%rsp)
movss %xmm3, 16(%rsp)
movss %xmm4, 12(%rsp)
movss %xmm5, 8(%rsp)
movss %xmm6, 4(%rsp)
movss %xmm7, (%rsp)
movq %fs:40, %rax
movq %rax, 296(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 28(%rsp), %rax
movq %rax, 144(%rsp)
leaq 24(%rsp), %rax
movq %rax, 152(%rsp)
leaq 20(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 12(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
leaq 4(%rsp), %rax
movq %rax, 192(%rsp)
movq %rsp, %rax
movq %rax, 200(%rsp)
leaq 320(%rsp), %rax
movq %rax, 208(%rsp)
leaq 328(%rsp), %rax
movq %rax, 216(%rsp)
leaq 336(%rsp), %rax
movq %rax, 224(%rsp)
leaq 344(%rsp), %rax
movq %rax, 232(%rsp)
leaq 352(%rsp), %rax
movq %rax, 240(%rsp)
leaq 360(%rsp), %rax
movq %rax, 248(%rsp)
leaq 368(%rsp), %rax
movq %rax, 256(%rsp)
leaq 376(%rsp), %rax
movq %rax, 264(%rsp)
leaq 384(%rsp), %rax
movq %rax, 272(%rsp)
leaq 392(%rsp), %rax
movq %rax, 280(%rsp)
leaq 400(%rsp), %rax
movq %rax, 288(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 296(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $312, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 328
pushq 56(%rsp)
.cfi_def_cfa_offset 336
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z7computefiiififffffffffffffffff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 320
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z47__device_stub__Z7computefiiififfffffffffffffffffiiififffffffffffffffff, .-_Z47__device_stub__Z7computefiiififfffffffffffffffffiiififffffffffffffffff
.globl _Z7computefiiififffffffffffffffff
.type _Z7computefiiififffffffffffffffff, @function
_Z7computefiiififffffffffffffffff:
.LFB2084:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movss 192(%rsp), %xmm8
movss %xmm8, 80(%rsp)
movss 184(%rsp), %xmm8
movss %xmm8, 72(%rsp)
movss 176(%rsp), %xmm8
movss %xmm8, 64(%rsp)
movss 168(%rsp), %xmm8
movss %xmm8, 56(%rsp)
movss 160(%rsp), %xmm8
movss %xmm8, 48(%rsp)
movss 152(%rsp), %xmm8
movss %xmm8, 40(%rsp)
movss 144(%rsp), %xmm8
movss %xmm8, 32(%rsp)
movss 136(%rsp), %xmm8
movss %xmm8, 24(%rsp)
movss 128(%rsp), %xmm8
movss %xmm8, 16(%rsp)
movss 120(%rsp), %xmm8
movss %xmm8, 8(%rsp)
movss 112(%rsp), %xmm8
movss %xmm8, (%rsp)
call _Z47__device_stub__Z7computefiiififfffffffffffffffffiiififffffffffffffffff
addq $104, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z7computefiiififffffffffffffffff, .-_Z7computefiiififffffffffffffffff
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $192, %rsp
.cfi_def_cfa_offset 240
movq %rsi, %rbx
movq 8(%rsi), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 152(%rsp)
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movq 24(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r12
movq 32(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r13
movq 40(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 144(%rsp)
movq 48(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r14
movq 56(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 136(%rsp)
movq 64(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 128(%rsp)
movq 72(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 120(%rsp)
movq 80(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 112(%rsp)
movq 88(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 104(%rsp)
movq 96(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 96(%rsp)
movq 104(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 88(%rsp)
movq 112(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 80(%rsp)
movq 120(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 72(%rsp)
movq 128(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 64(%rsp)
movq 136(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 56(%rsp)
movq 144(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 48(%rsp)
movq 152(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 40(%rsp)
movq 160(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 32(%rsp)
movq 168(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 24(%rsp)
movq 176(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 16(%rsp)
movq 184(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 8(%rsp)
movl $1, 180(%rsp)
movl $1, 184(%rsp)
movl $1, 168(%rsp)
movl $1, 172(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 180(%rsp), %rdx
movl $1, %ecx
movq 168(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L16:
call cudaDeviceSynchronize@PLT
movl $0, %eax
addq $192, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pxor %xmm0, %xmm0
cvtsd2ss 152(%rsp), %xmm0
subq $96, %rsp
.cfi_def_cfa_offset 336
pxor %xmm1, %xmm1
cvtsd2ss 104(%rsp), %xmm1
movss %xmm1, 80(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 112(%rsp), %xmm1
movss %xmm1, 72(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 120(%rsp), %xmm1
movss %xmm1, 64(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 128(%rsp), %xmm1
movss %xmm1, 56(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 136(%rsp), %xmm1
movss %xmm1, 48(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 144(%rsp), %xmm1
movss %xmm1, 40(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 152(%rsp), %xmm1
movss %xmm1, 32(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 160(%rsp), %xmm1
movss %xmm1, 24(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 168(%rsp), %xmm1
movss %xmm1, 16(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 176(%rsp), %xmm1
movss %xmm1, 8(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 184(%rsp), %xmm1
movss %xmm1, (%rsp)
pxor %xmm7, %xmm7
cvtsd2ss 192(%rsp), %xmm7
pxor %xmm6, %xmm6
cvtsd2ss 200(%rsp), %xmm6
pxor %xmm5, %xmm5
cvtsd2ss 208(%rsp), %xmm5
pxor %xmm4, %xmm4
cvtsd2ss 216(%rsp), %xmm4
pxor %xmm3, %xmm3
cvtsd2ss 224(%rsp), %xmm3
pxor %xmm2, %xmm2
cvtsd2ss 232(%rsp), %xmm2
movl %r14d, %ecx
pxor %xmm1, %xmm1
cvtsd2ss 240(%rsp), %xmm1
movl %r13d, %edx
movl %r12d, %esi
movl %ebp, %edi
call _Z47__device_stub__Z7computefiiififfffffffffffffffffiiififffffffffffffffff
addq $96, %rsp
.cfi_def_cfa_offset 240
jmp .L16
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z7computefiiififffffffffffffffff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7computefiiififffffffffffffffff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
for (int i=0; i < var_3; ++i) {
if (comp == asinf(+1.6052E-29f * (var_4 - (+1.3418E-44f * -1.1849E-36f)))) {
float tmp_1 = +1.4940E-23f;
float tmp_2 = -1.6240E35f + -1.2672E-41f + +1.0623E34f + atan2f(logf(var_6 * -0.0f), (-1.4919E-42f * atan2f((-1.9619E-37f * (-1.6127E-37f + acosf(var_7 * -1.0424E-41f / -1.8312E-36f * var_8))), var_9 * (-1.5333E-43f * var_10 / powf(var_11 - coshf((var_12 - coshf(var_13 / -1.9055E20f))), -1.0315E9f)))));
comp = tmp_2 * tmp_1 - +1.6798E36f * (+1.1643E34f - (var_14 * +1.4241E36f * (var_15 + var_16)));
comp += var_17 / floorf(+0.0f);
for (int i=0; i < var_5; ++i) {
comp += +1.6689E-43f - asinf(-1.4086E35f / +0.0f * var_18 + var_19);
}
if (comp == (+1.3292E34f - var_20 * (-1.9025E-42f - var_21))) {
comp = +1.9010E19f * var_22 * -1.4243E-5f * tanhf((-1.4812E-35f * +1.5328E36f));
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23);
cudaDeviceSynchronize();
return 0;
} | /* This is a automatically generated test. Do not modify */
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
for (int i=0; i < var_3; ++i) {
if (comp == asinf(+1.6052E-29f * (var_4 - (+1.3418E-44f * -1.1849E-36f)))) {
float tmp_1 = +1.4940E-23f;
float tmp_2 = -1.6240E35f + -1.2672E-41f + +1.0623E34f + atan2f(logf(var_6 * -0.0f), (-1.4919E-42f * atan2f((-1.9619E-37f * (-1.6127E-37f + acosf(var_7 * -1.0424E-41f / -1.8312E-36f * var_8))), var_9 * (-1.5333E-43f * var_10 / powf(var_11 - coshf((var_12 - coshf(var_13 / -1.9055E20f))), -1.0315E9f)))));
comp = tmp_2 * tmp_1 - +1.6798E36f * (+1.1643E34f - (var_14 * +1.4241E36f * (var_15 + var_16)));
comp += var_17 / floorf(+0.0f);
for (int i=0; i < var_5; ++i) {
comp += +1.6689E-43f - asinf(-1.4086E35f / +0.0f * var_18 + var_19);
}
if (comp == (+1.3292E34f - var_20 * (-1.9025E-42f - var_21))) {
comp = +1.9010E19f * var_22 * -1.4243E-5f * tanhf((-1.4812E-35f * +1.5328E36f));
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23);
hipDeviceSynchronize();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* This is a automatically generated test. Do not modify */
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
for (int i=0; i < var_3; ++i) {
if (comp == asinf(+1.6052E-29f * (var_4 - (+1.3418E-44f * -1.1849E-36f)))) {
float tmp_1 = +1.4940E-23f;
float tmp_2 = -1.6240E35f + -1.2672E-41f + +1.0623E34f + atan2f(logf(var_6 * -0.0f), (-1.4919E-42f * atan2f((-1.9619E-37f * (-1.6127E-37f + acosf(var_7 * -1.0424E-41f / -1.8312E-36f * var_8))), var_9 * (-1.5333E-43f * var_10 / powf(var_11 - coshf((var_12 - coshf(var_13 / -1.9055E20f))), -1.0315E9f)))));
comp = tmp_2 * tmp_1 - +1.6798E36f * (+1.1643E34f - (var_14 * +1.4241E36f * (var_15 + var_16)));
comp += var_17 / floorf(+0.0f);
for (int i=0; i < var_5; ++i) {
comp += +1.6689E-43f - asinf(-1.4086E35f / +0.0f * var_18 + var_19);
}
if (comp == (+1.3292E34f - var_20 * (-1.9025E-42f - var_21))) {
comp = +1.9010E19f * var_22 * -1.4243E-5f * tanhf((-1.4812E-35f * +1.5328E36f));
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23);
hipDeviceSynchronize();
return 0;
} | .text
.file "test.hip"
.globl _Z22__device_stub__computefiiififffffffffffffffff # -- Begin function _Z22__device_stub__computefiiififffffffffffffffff
.p2align 4, 0x90
.type _Z22__device_stub__computefiiififffffffffffffffff,@function
_Z22__device_stub__computefiiififffffffffffffffff: # @_Z22__device_stub__computefiiififffffffffffffffff
.cfi_startproc
# %bb.0:
subq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 288
movss %xmm0, 44(%rsp)
movl %edi, 40(%rsp)
movl %esi, 36(%rsp)
movl %edx, 32(%rsp)
movss %xmm1, 28(%rsp)
movl %ecx, 24(%rsp)
movss %xmm2, 20(%rsp)
movss %xmm3, 16(%rsp)
movss %xmm4, 12(%rsp)
movss %xmm5, 8(%rsp)
movss %xmm6, 4(%rsp)
movss %xmm7, (%rsp)
leaq 44(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rax
movq %rax, 104(%rsp)
leaq 36(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 12(%rsp), %rax
movq %rax, 160(%rsp)
leaq 8(%rsp), %rax
movq %rax, 168(%rsp)
leaq 4(%rsp), %rax
movq %rax, 176(%rsp)
movq %rsp, %rax
movq %rax, 184(%rsp)
leaq 288(%rsp), %rax
movq %rax, 192(%rsp)
leaq 296(%rsp), %rax
movq %rax, 200(%rsp)
leaq 304(%rsp), %rax
movq %rax, 208(%rsp)
leaq 312(%rsp), %rax
movq %rax, 216(%rsp)
leaq 320(%rsp), %rax
movq %rax, 224(%rsp)
leaq 328(%rsp), %rax
movq %rax, 232(%rsp)
leaq 336(%rsp), %rax
movq %rax, 240(%rsp)
leaq 344(%rsp), %rax
movq %rax, 248(%rsp)
leaq 352(%rsp), %rax
movq %rax, 256(%rsp)
leaq 360(%rsp), %rax
movq %rax, 264(%rsp)
leaq 368(%rsp), %rax
movq %rax, 272(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7computefiiififffffffffffffffff, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $296, %rsp # imm = 0x128
.cfi_adjust_cfa_offset -296
retq
.Lfunc_end0:
.size _Z22__device_stub__computefiiififffffffffffffffff, .Lfunc_end0-_Z22__device_stub__computefiiififffffffffffffffff
.cfi_endproc
# -- End function
.globl _Z11initPointerf # -- Begin function _Z11initPointerf
.p2align 4, 0x90
.type _Z11initPointerf,@function
_Z11initPointerf: # @_Z11initPointerf
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movss %xmm0, 4(%rsp) # 4-byte Spill
movl $40, %edi
callq malloc
movss 4(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movss %xmm0, (%rax,%rcx,4)
incq %rcx
cmpq $10, %rcx
jne .LBB1_1
# %bb.2:
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z11initPointerf, .Lfunc_end1-_Z11initPointerf
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $240, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %r13
movq 8(%rsi), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 232(%rsp) # 8-byte Spill
movq 16(%r13), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
movq 24(%r13), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movq 32(%r13), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
movq 40(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 224(%rsp) # 8-byte Spill
movq 48(%r13), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r12
movq 56(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 216(%rsp) # 8-byte Spill
movq 64(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 208(%rsp) # 8-byte Spill
movq 72(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 200(%rsp) # 8-byte Spill
movq 80(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 104(%rsp) # 8-byte Spill
movq 88(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 96(%rsp) # 8-byte Spill
movq 96(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 88(%rsp) # 8-byte Spill
movq 104(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 192(%rsp) # 8-byte Spill
movq 112(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 184(%rsp) # 8-byte Spill
movq 120(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 176(%rsp) # 8-byte Spill
movq 128(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 168(%rsp) # 8-byte Spill
movq 136(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 160(%rsp) # 8-byte Spill
movq 144(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 152(%rsp) # 8-byte Spill
movq 152(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 144(%rsp) # 8-byte Spill
movq 160(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 136(%rsp) # 8-byte Spill
movq 168(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 128(%rsp) # 8-byte Spill
movq 176(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 120(%rsp) # 8-byte Spill
movq 184(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 112(%rsp) # 8-byte Spill
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movsd 112(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm8
movsd 120(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm9
movsd 128(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm10
movsd 136(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm11
movsd 144(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm12
movsd 152(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm13
movsd 160(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm14
movsd 168(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm15
movsd 176(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm2
movsd 184(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm3
movsd 192(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm4
movsd 88(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 88(%rsp) # 4-byte Spill
movsd 96(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 96(%rsp) # 4-byte Spill
movsd 104(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 104(%rsp) # 4-byte Spill
movsd 200(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm7
movsd 208(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm6
movsd 216(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm5
movsd 224(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm1
movsd 232(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm8, 80(%rsp)
movss %xmm9, 72(%rsp)
movss %xmm10, 64(%rsp)
movss %xmm11, 56(%rsp)
movss %xmm12, 48(%rsp)
movss %xmm13, 40(%rsp)
movss %xmm14, 32(%rsp)
movss %xmm15, 24(%rsp)
movss %xmm2, 16(%rsp)
movss %xmm3, 8(%rsp)
movss %xmm4, (%rsp)
movl %ebx, %edi
movl %r14d, %esi
movl %r15d, %edx
movl %r12d, %ecx
movaps %xmm5, %xmm2
movaps %xmm6, %xmm3
movaps %xmm7, %xmm4
movss 104(%rsp), %xmm5 # 4-byte Reload
# xmm5 = mem[0],zero,zero,zero
movss 96(%rsp), %xmm6 # 4-byte Reload
# xmm6 = mem[0],zero,zero,zero
movss 88(%rsp), %xmm7 # 4-byte Reload
# xmm7 = mem[0],zero,zero,zero
callq _Z22__device_stub__computefiiififffffffffffffffff
.LBB2_2:
callq hipDeviceSynchronize
xorl %eax, %eax
addq $240, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7computefiiififffffffffffffffff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7computefiiififffffffffffffffff,@object # @_Z7computefiiififffffffffffffffff
.section .rodata,"a",@progbits
.globl _Z7computefiiififffffffffffffffff
.p2align 3, 0x0
_Z7computefiiififffffffffffffffff:
.quad _Z22__device_stub__computefiiififffffffffffffffff
.size _Z7computefiiififffffffffffffffff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7computefiiififffffffffffffffff"
.size .L__unnamed_1, 34
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__computefiiififffffffffffffffff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7computefiiififffffffffffffffff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000990a0_00000000-6_test.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11initPointerf
.type _Z11initPointerf, @function
_Z11initPointerf:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movd %xmm0, %ebx
movl $40, %edi
call malloc@PLT
movq %rax, %rdx
leaq 40(%rax), %rcx
.L4:
movl %ebx, (%rdx)
addq $4, %rdx
cmpq %rcx, %rdx
jne .L4
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z11initPointerf, .-_Z11initPointerf
.globl _Z47__device_stub__Z7computefiiififfffffffffffffffffiiififffffffffffffffff
.type _Z47__device_stub__Z7computefiiififfffffffffffffffffiiififffffffffffffffff, @function
_Z47__device_stub__Z7computefiiififfffffffffffffffffiiififffffffffffffffff:
.LFB2083:
.cfi_startproc
endbr64
subq $312, %rsp
.cfi_def_cfa_offset 320
movss %xmm0, 44(%rsp)
movl %edi, 40(%rsp)
movl %esi, 36(%rsp)
movl %edx, 32(%rsp)
movss %xmm1, 28(%rsp)
movl %ecx, 24(%rsp)
movss %xmm2, 20(%rsp)
movss %xmm3, 16(%rsp)
movss %xmm4, 12(%rsp)
movss %xmm5, 8(%rsp)
movss %xmm6, 4(%rsp)
movss %xmm7, (%rsp)
movq %fs:40, %rax
movq %rax, 296(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 28(%rsp), %rax
movq %rax, 144(%rsp)
leaq 24(%rsp), %rax
movq %rax, 152(%rsp)
leaq 20(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 12(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
leaq 4(%rsp), %rax
movq %rax, 192(%rsp)
movq %rsp, %rax
movq %rax, 200(%rsp)
leaq 320(%rsp), %rax
movq %rax, 208(%rsp)
leaq 328(%rsp), %rax
movq %rax, 216(%rsp)
leaq 336(%rsp), %rax
movq %rax, 224(%rsp)
leaq 344(%rsp), %rax
movq %rax, 232(%rsp)
leaq 352(%rsp), %rax
movq %rax, 240(%rsp)
leaq 360(%rsp), %rax
movq %rax, 248(%rsp)
leaq 368(%rsp), %rax
movq %rax, 256(%rsp)
leaq 376(%rsp), %rax
movq %rax, 264(%rsp)
leaq 384(%rsp), %rax
movq %rax, 272(%rsp)
leaq 392(%rsp), %rax
movq %rax, 280(%rsp)
leaq 400(%rsp), %rax
movq %rax, 288(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 296(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $312, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 328
pushq 56(%rsp)
.cfi_def_cfa_offset 336
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z7computefiiififffffffffffffffff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 320
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z47__device_stub__Z7computefiiififfffffffffffffffffiiififffffffffffffffff, .-_Z47__device_stub__Z7computefiiififfffffffffffffffffiiififffffffffffffffff
.globl _Z7computefiiififffffffffffffffff
.type _Z7computefiiififffffffffffffffff, @function
_Z7computefiiififffffffffffffffff:
.LFB2084:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movss 192(%rsp), %xmm8
movss %xmm8, 80(%rsp)
movss 184(%rsp), %xmm8
movss %xmm8, 72(%rsp)
movss 176(%rsp), %xmm8
movss %xmm8, 64(%rsp)
movss 168(%rsp), %xmm8
movss %xmm8, 56(%rsp)
movss 160(%rsp), %xmm8
movss %xmm8, 48(%rsp)
movss 152(%rsp), %xmm8
movss %xmm8, 40(%rsp)
movss 144(%rsp), %xmm8
movss %xmm8, 32(%rsp)
movss 136(%rsp), %xmm8
movss %xmm8, 24(%rsp)
movss 128(%rsp), %xmm8
movss %xmm8, 16(%rsp)
movss 120(%rsp), %xmm8
movss %xmm8, 8(%rsp)
movss 112(%rsp), %xmm8
movss %xmm8, (%rsp)
call _Z47__device_stub__Z7computefiiififfffffffffffffffffiiififffffffffffffffff
addq $104, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z7computefiiififffffffffffffffff, .-_Z7computefiiififffffffffffffffff
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $192, %rsp
.cfi_def_cfa_offset 240
movq %rsi, %rbx
movq 8(%rsi), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 152(%rsp)
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movq 24(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r12
movq 32(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r13
movq 40(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 144(%rsp)
movq 48(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r14
movq 56(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 136(%rsp)
movq 64(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 128(%rsp)
movq 72(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 120(%rsp)
movq 80(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 112(%rsp)
movq 88(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 104(%rsp)
movq 96(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 96(%rsp)
movq 104(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 88(%rsp)
movq 112(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 80(%rsp)
movq 120(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 72(%rsp)
movq 128(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 64(%rsp)
movq 136(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 56(%rsp)
movq 144(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 48(%rsp)
movq 152(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 40(%rsp)
movq 160(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 32(%rsp)
movq 168(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 24(%rsp)
movq 176(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 16(%rsp)
movq 184(%rbx), %rdi
movl $0, %esi
call strtod@PLT
movsd %xmm0, 8(%rsp)
movl $1, 180(%rsp)
movl $1, 184(%rsp)
movl $1, 168(%rsp)
movl $1, 172(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 180(%rsp), %rdx
movl $1, %ecx
movq 168(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L16:
call cudaDeviceSynchronize@PLT
movl $0, %eax
addq $192, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pxor %xmm0, %xmm0
cvtsd2ss 152(%rsp), %xmm0
subq $96, %rsp
.cfi_def_cfa_offset 336
pxor %xmm1, %xmm1
cvtsd2ss 104(%rsp), %xmm1
movss %xmm1, 80(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 112(%rsp), %xmm1
movss %xmm1, 72(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 120(%rsp), %xmm1
movss %xmm1, 64(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 128(%rsp), %xmm1
movss %xmm1, 56(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 136(%rsp), %xmm1
movss %xmm1, 48(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 144(%rsp), %xmm1
movss %xmm1, 40(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 152(%rsp), %xmm1
movss %xmm1, 32(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 160(%rsp), %xmm1
movss %xmm1, 24(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 168(%rsp), %xmm1
movss %xmm1, 16(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 176(%rsp), %xmm1
movss %xmm1, 8(%rsp)
pxor %xmm1, %xmm1
cvtsd2ss 184(%rsp), %xmm1
movss %xmm1, (%rsp)
pxor %xmm7, %xmm7
cvtsd2ss 192(%rsp), %xmm7
pxor %xmm6, %xmm6
cvtsd2ss 200(%rsp), %xmm6
pxor %xmm5, %xmm5
cvtsd2ss 208(%rsp), %xmm5
pxor %xmm4, %xmm4
cvtsd2ss 216(%rsp), %xmm4
pxor %xmm3, %xmm3
cvtsd2ss 224(%rsp), %xmm3
pxor %xmm2, %xmm2
cvtsd2ss 232(%rsp), %xmm2
movl %r14d, %ecx
pxor %xmm1, %xmm1
cvtsd2ss 240(%rsp), %xmm1
movl %r13d, %edx
movl %r12d, %esi
movl %ebp, %edi
call _Z47__device_stub__Z7computefiiififfffffffffffffffffiiififffffffffffffffff
addq $96, %rsp
.cfi_def_cfa_offset 240
jmp .L16
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z7computefiiififffffffffffffffff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7computefiiififffffffffffffffff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "test.hip"
.globl _Z22__device_stub__computefiiififffffffffffffffff # -- Begin function _Z22__device_stub__computefiiififffffffffffffffff
.p2align 4, 0x90
.type _Z22__device_stub__computefiiififffffffffffffffff,@function
_Z22__device_stub__computefiiififffffffffffffffff: # @_Z22__device_stub__computefiiififffffffffffffffff
.cfi_startproc
# %bb.0:
subq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 288
movss %xmm0, 44(%rsp)
movl %edi, 40(%rsp)
movl %esi, 36(%rsp)
movl %edx, 32(%rsp)
movss %xmm1, 28(%rsp)
movl %ecx, 24(%rsp)
movss %xmm2, 20(%rsp)
movss %xmm3, 16(%rsp)
movss %xmm4, 12(%rsp)
movss %xmm5, 8(%rsp)
movss %xmm6, 4(%rsp)
movss %xmm7, (%rsp)
leaq 44(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rax
movq %rax, 104(%rsp)
leaq 36(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 12(%rsp), %rax
movq %rax, 160(%rsp)
leaq 8(%rsp), %rax
movq %rax, 168(%rsp)
leaq 4(%rsp), %rax
movq %rax, 176(%rsp)
movq %rsp, %rax
movq %rax, 184(%rsp)
leaq 288(%rsp), %rax
movq %rax, 192(%rsp)
leaq 296(%rsp), %rax
movq %rax, 200(%rsp)
leaq 304(%rsp), %rax
movq %rax, 208(%rsp)
leaq 312(%rsp), %rax
movq %rax, 216(%rsp)
leaq 320(%rsp), %rax
movq %rax, 224(%rsp)
leaq 328(%rsp), %rax
movq %rax, 232(%rsp)
leaq 336(%rsp), %rax
movq %rax, 240(%rsp)
leaq 344(%rsp), %rax
movq %rax, 248(%rsp)
leaq 352(%rsp), %rax
movq %rax, 256(%rsp)
leaq 360(%rsp), %rax
movq %rax, 264(%rsp)
leaq 368(%rsp), %rax
movq %rax, 272(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7computefiiififffffffffffffffff, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $296, %rsp # imm = 0x128
.cfi_adjust_cfa_offset -296
retq
.Lfunc_end0:
.size _Z22__device_stub__computefiiififffffffffffffffff, .Lfunc_end0-_Z22__device_stub__computefiiififffffffffffffffff
.cfi_endproc
# -- End function
.globl _Z11initPointerf # -- Begin function _Z11initPointerf
.p2align 4, 0x90
.type _Z11initPointerf,@function
_Z11initPointerf: # @_Z11initPointerf
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movss %xmm0, 4(%rsp) # 4-byte Spill
movl $40, %edi
callq malloc
movss 4(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movss %xmm0, (%rax,%rcx,4)
incq %rcx
cmpq $10, %rcx
jne .LBB1_1
# %bb.2:
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z11initPointerf, .Lfunc_end1-_Z11initPointerf
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $240, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %r13
movq 8(%rsi), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 232(%rsp) # 8-byte Spill
movq 16(%r13), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
movq 24(%r13), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movq 32(%r13), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
movq 40(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 224(%rsp) # 8-byte Spill
movq 48(%r13), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r12
movq 56(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 216(%rsp) # 8-byte Spill
movq 64(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 208(%rsp) # 8-byte Spill
movq 72(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 200(%rsp) # 8-byte Spill
movq 80(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 104(%rsp) # 8-byte Spill
movq 88(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 96(%rsp) # 8-byte Spill
movq 96(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 88(%rsp) # 8-byte Spill
movq 104(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 192(%rsp) # 8-byte Spill
movq 112(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 184(%rsp) # 8-byte Spill
movq 120(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 176(%rsp) # 8-byte Spill
movq 128(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 168(%rsp) # 8-byte Spill
movq 136(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 160(%rsp) # 8-byte Spill
movq 144(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 152(%rsp) # 8-byte Spill
movq 152(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 144(%rsp) # 8-byte Spill
movq 160(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 136(%rsp) # 8-byte Spill
movq 168(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 128(%rsp) # 8-byte Spill
movq 176(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 120(%rsp) # 8-byte Spill
movq 184(%r13), %rdi
xorl %esi, %esi
callq strtod
movsd %xmm0, 112(%rsp) # 8-byte Spill
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movsd 112(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm8
movsd 120(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm9
movsd 128(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm10
movsd 136(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm11
movsd 144(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm12
movsd 152(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm13
movsd 160(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm14
movsd 168(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm15
movsd 176(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm2
movsd 184(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm3
movsd 192(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm4
movsd 88(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 88(%rsp) # 4-byte Spill
movsd 96(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 96(%rsp) # 4-byte Spill
movsd 104(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 104(%rsp) # 4-byte Spill
movsd 200(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm7
movsd 208(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm6
movsd 216(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm5
movsd 224(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm1
movsd 232(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
cvtsd2ss %xmm0, %xmm0
movss %xmm8, 80(%rsp)
movss %xmm9, 72(%rsp)
movss %xmm10, 64(%rsp)
movss %xmm11, 56(%rsp)
movss %xmm12, 48(%rsp)
movss %xmm13, 40(%rsp)
movss %xmm14, 32(%rsp)
movss %xmm15, 24(%rsp)
movss %xmm2, 16(%rsp)
movss %xmm3, 8(%rsp)
movss %xmm4, (%rsp)
movl %ebx, %edi
movl %r14d, %esi
movl %r15d, %edx
movl %r12d, %ecx
movaps %xmm5, %xmm2
movaps %xmm6, %xmm3
movaps %xmm7, %xmm4
movss 104(%rsp), %xmm5 # 4-byte Reload
# xmm5 = mem[0],zero,zero,zero
movss 96(%rsp), %xmm6 # 4-byte Reload
# xmm6 = mem[0],zero,zero,zero
movss 88(%rsp), %xmm7 # 4-byte Reload
# xmm7 = mem[0],zero,zero,zero
callq _Z22__device_stub__computefiiififffffffffffffffff
.LBB2_2:
callq hipDeviceSynchronize
xorl %eax, %eax
addq $240, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7computefiiififffffffffffffffff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7computefiiififffffffffffffffff,@object # @_Z7computefiiififffffffffffffffff
.section .rodata,"a",@progbits
.globl _Z7computefiiififffffffffffffffff
.p2align 3, 0x0
_Z7computefiiififffffffffffffffff:
.quad _Z22__device_stub__computefiiififffffffffffffffff
.size _Z7computefiiififffffffffffffffff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7computefiiififffffffffffffffff"
.size .L__unnamed_1, 34
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__computefiiififffffffffffffffff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7computefiiififffffffffffffffff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //headers
#include <stdio.h>
#include <cuda.h>
#define imin(a, b) ((a < b) ? a : b)
#define sum_squares(x) (x * (x + 1) * (2 * x + 1) / 6)
//global variables
float *hostA = NULL;
float *hostB = NULL;
float *partial_hostC = NULL;
float *deviceA = NULL;
float *deviceB = NULL;
float *partial_deviceC = NULL;
const int iNumberOfArrayElements = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (iNumberOfArrayElements + threadsPerBlock - 1) / threadsPerBlock);
// *** CUDA KERNEL DEFINITION ***
__global__ void vecDotProduct(float *input1, float *input2, float *output)
{
//variable declaration
//shared across all threads within block
__shared__ float cache[threadsPerBlock];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int cacheIndex = threadIdx.x;
float temp = 0;
//code
while(tid < iNumberOfArrayElements)
{
temp += input1[tid] * input2[tid];
tid += blockDim.x * gridDim.x;
}
//set the cache values
cache[cacheIndex] = temp;
//synchronize threads in the block
__syncthreads();
//summation reduction
int i = blockDim.x / 2;
while(i != 0)
{
if(cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
//copy to output memory
if(cacheIndex == 0)
output[blockIdx.x] = cache[0];
}
int main(void)
{
//function declaration
void cleanup(void);
//code
//allocate memory on host
hostA = (float *)malloc(iNumberOfArrayElements * sizeof(float));
if(hostA == NULL)
{
printf("CPU Memory Fatal Error - Can Not Allocate Enough Memory For Host Input Array 1.\nExiting Now ...\n");
exit(EXIT_FAILURE);
}
hostB = (float *)malloc(iNumberOfArrayElements * sizeof(float));
if(hostB == NULL)
{
printf("CPU Memory Fatal Error - Can Not Allocate Enough Memory For Host Input Array 2.\nExiting Now ...\n");
cleanup();
exit(EXIT_FAILURE);
}
partial_hostC = (float *)malloc(blocksPerGrid * sizeof(float));
if(partial_hostC == NULL)
{
printf("CPU Memory Fatal Error - Can Not Allocate Enough Memory For Host Output Array.\nExiting Now ...\n");
cleanup();
exit(EXIT_FAILURE);
}
//allocate memory on device
cudaError_t err = cudaSuccess;
err = cudaMalloc((void **)&deviceA, iNumberOfArrayElements * sizeof(float));
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&deviceB, iNumberOfArrayElements * sizeof(float));
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&partial_deviceC, blocksPerGrid * sizeof(float));
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//fill the host input array
for(int i = 0; i < iNumberOfArrayElements; i++)
{
hostA[i] = i;
hostB[i] = i * 2;
}
//copy the host input array to device input
err = cudaMemcpy(deviceA, hostA, iNumberOfArrayElements * sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMemcpy(deviceB, hostB, iNumberOfArrayElements * sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//launch the kernel
vecDotProduct<<<blocksPerGrid, threadsPerBlock>>>(deviceA, deviceB, partial_deviceC);
//copy the device output array back to host
err = cudaMemcpy(partial_hostC, partial_deviceC, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//calculate final result on host
float finalC = 0.0f;
for(int i = 0; i < blocksPerGrid; i++)
{
finalC += partial_hostC[i];
}
//check if the final value is correct
if(finalC == (2 * sum_squares((float)(iNumberOfArrayElements - 1))))
printf("Dot Product Calculated On Device Is Accurate.\n");
else
printf("Dot Product Calculated On Device Is Not Accurate.\n");
printf("Dot Product = %0.6.\n", finalC);
printf("Expected Product = %0.6f.\n", 2 * sum_squares((float)(iNumberOfArrayElements - 1)));
//total cleanup
cleanup();
return (0);
}
void cleanup(void)
{
//code
//free device memory
if(partial_deviceC)
{
cudaFree(partial_deviceC);
partial_deviceC = NULL;
}
if(deviceB)
{
cudaFree(deviceB);
deviceB = NULL;
}
if(deviceA)
{
cudaFree(deviceA);
deviceA = NULL;
}
//free host memory
if(partial_hostC)
{
free(partial_hostC);
partial_hostC = NULL;
}
if(hostB)
{
free(hostB);
hostB = NULL;
}
if(hostA)
{
free(hostA);
hostA = NULL;
}
} | code for sm_80
Function : _Z13vecDotProductPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0030*/ BSSY B0, 0x150 ; /* 0x0000011000007945 */
/* 0x000fe20003800000 */
/*0040*/ HFMA2.MMA R6, -RZ, RZ, 0, 0 ; /* 0x00000000ff067435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R8, SR_TID.X ; /* 0x0000000000087919 */
/* 0x000e240000002100 */
/*0060*/ IMAD R0, R7, c[0x0][0x0], R8 ; /* 0x0000000007007a24 */
/* 0x001fca00078e0208 */
/*0070*/ ISETP.GT.AND P0, PT, R0, 0x83ff, PT ; /* 0x000083ff0000780c */
/* 0x000fda0003f04270 */
/*0080*/ @P0 BRA 0x140 ; /* 0x000000b000000947 */
/* 0x000fea0003800000 */
/*0090*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x000fe400078e00ff */
/*00a0*/ MOV R5, 0x4 ; /* 0x0000000400057802 */
/* 0x000fca0000000f00 */
/*00b0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0205 */
/*00c0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fe400078e0205 */
/*00d0*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea8000c1e1900 */
/*00e0*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff097624 */
/* 0x000fc800078e00ff */
/*0100*/ IMAD R0, R9, c[0x0][0xc], R0 ; /* 0x0000030009007a24 */
/* 0x000fca00078e0200 */
/*0110*/ ISETP.GE.AND P0, PT, R0, 0x8400, PT ; /* 0x000084000000780c */
/* 0x000fe20003f06270 */
/*0120*/ FFMA R6, R5, R2, R6 ; /* 0x0000000205067223 */
/* 0x004fd80000000006 */
/*0130*/ @!P0 BRA 0xa0 ; /* 0xffffff6000008947 */
/* 0x000fea000383ffff */
/*0140*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0150*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0160*/ STS [R8.X4], R6 ; /* 0x0000000608007388 */
/* 0x0001e20000004800 */
/*0170*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fc60008011604 */
/*0180*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0190*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f05270 */
/*01a0*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fda000bf25270 */
/*01b0*/ @!P1 BRA 0x2a0 ; /* 0x000000e000009947 */
/* 0x000fea0003800000 */
/*01c0*/ SHF.L.U32 R0, R8, 0x2, RZ ; /* 0x0000000208007819 */
/* 0x001fe200000006ff */
/*01d0*/ IMAD.U32 R3, RZ, RZ, UR4 ; /* 0x00000004ff037e24 */
/* 0x000fca000f8e00ff */
/*01e0*/ ISETP.GE.AND P1, PT, R8, R3, PT ; /* 0x000000030800720c */
/* 0x000fda0003f26270 */
/*01f0*/ @!P1 LEA R2, R3.reuse, R0, 0x2 ; /* 0x0000000003029211 */
/* 0x040fe200078e10ff */
/*0200*/ @!P1 LDS R4, [R8.X4] ; /* 0x0000000008049984 */
/* 0x000fe80000004800 */
/*0210*/ @!P1 LDS R5, [R2] ; /* 0x0000000002059984 */
/* 0x000e240000000800 */
/*0220*/ @!P1 FADD R4, R4, R5 ; /* 0x0000000504049221 */
/* 0x001fe20000000000 */
/*0230*/ IADD3 R5, R3.reuse, 0x1, RZ ; /* 0x0000000103057810 */
/* 0x040fe40007ffe0ff */
/*0240*/ LEA.HI R3, R3, R3, RZ, 0x1 ; /* 0x0000000303037211 */
/* 0x000fc400078f08ff */
/*0250*/ @!P1 STS [R8.X4], R4 ; /* 0x0000000408009388 */
/* 0x0001e80000004800 */
/*0260*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0270*/ ISETP.GT.U32.AND P1, PT, R5, 0x2, PT ; /* 0x000000020500780c */
/* 0x000fe40003f24070 */
/*0280*/ SHF.R.S32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fd60000011403 */
/*0290*/ @P1 BRA 0x1e0 ; /* 0xffffff4000001947 */
/* 0x001fea000383ffff */
/*02a0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*02b0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*02c0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fc800078e00ff */
/*02d0*/ IMAD.WIDE.U32 R2, R7, R2, c[0x0][0x170] ; /* 0x00005c0007027625 */
/* 0x000fca00078e0002 */
/*02e0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*02f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0300*/ BRA 0x300; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //headers
#include <stdio.h>
#include <cuda.h>
#define imin(a, b) ((a < b) ? a : b)
#define sum_squares(x) (x * (x + 1) * (2 * x + 1) / 6)
//global variables
float *hostA = NULL;
float *hostB = NULL;
float *partial_hostC = NULL;
float *deviceA = NULL;
float *deviceB = NULL;
float *partial_deviceC = NULL;
const int iNumberOfArrayElements = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (iNumberOfArrayElements + threadsPerBlock - 1) / threadsPerBlock);
// *** CUDA KERNEL DEFINITION ***
__global__ void vecDotProduct(float *input1, float *input2, float *output)
{
//variable declaration
//shared across all threads within block
__shared__ float cache[threadsPerBlock];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int cacheIndex = threadIdx.x;
float temp = 0;
//code
while(tid < iNumberOfArrayElements)
{
temp += input1[tid] * input2[tid];
tid += blockDim.x * gridDim.x;
}
//set the cache values
cache[cacheIndex] = temp;
//synchronize threads in the block
__syncthreads();
//summation reduction
int i = blockDim.x / 2;
while(i != 0)
{
if(cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
//copy to output memory
if(cacheIndex == 0)
output[blockIdx.x] = cache[0];
}
int main(void)
{
//function declaration
void cleanup(void);
//code
//allocate memory on host
hostA = (float *)malloc(iNumberOfArrayElements * sizeof(float));
if(hostA == NULL)
{
printf("CPU Memory Fatal Error - Can Not Allocate Enough Memory For Host Input Array 1.\nExiting Now ...\n");
exit(EXIT_FAILURE);
}
hostB = (float *)malloc(iNumberOfArrayElements * sizeof(float));
if(hostB == NULL)
{
printf("CPU Memory Fatal Error - Can Not Allocate Enough Memory For Host Input Array 2.\nExiting Now ...\n");
cleanup();
exit(EXIT_FAILURE);
}
partial_hostC = (float *)malloc(blocksPerGrid * sizeof(float));
if(partial_hostC == NULL)
{
printf("CPU Memory Fatal Error - Can Not Allocate Enough Memory For Host Output Array.\nExiting Now ...\n");
cleanup();
exit(EXIT_FAILURE);
}
//allocate memory on device
cudaError_t err = cudaSuccess;
err = cudaMalloc((void **)&deviceA, iNumberOfArrayElements * sizeof(float));
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&deviceB, iNumberOfArrayElements * sizeof(float));
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&partial_deviceC, blocksPerGrid * sizeof(float));
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//fill the host input array
for(int i = 0; i < iNumberOfArrayElements; i++)
{
hostA[i] = i;
hostB[i] = i * 2;
}
//copy the host input array to device input
err = cudaMemcpy(deviceA, hostA, iNumberOfArrayElements * sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMemcpy(deviceB, hostB, iNumberOfArrayElements * sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//launch the kernel
vecDotProduct<<<blocksPerGrid, threadsPerBlock>>>(deviceA, deviceB, partial_deviceC);
//copy the device output array back to host
err = cudaMemcpy(partial_hostC, partial_deviceC, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//calculate final result on host
float finalC = 0.0f;
for(int i = 0; i < blocksPerGrid; i++)
{
finalC += partial_hostC[i];
}
//check if the final value is correct
if(finalC == (2 * sum_squares((float)(iNumberOfArrayElements - 1))))
printf("Dot Product Calculated On Device Is Accurate.\n");
else
printf("Dot Product Calculated On Device Is Not Accurate.\n");
printf("Dot Product = %0.6.\n", finalC);
printf("Expected Product = %0.6f.\n", 2 * sum_squares((float)(iNumberOfArrayElements - 1)));
//total cleanup
cleanup();
return (0);
}
void cleanup(void)
{
//code
//free device memory
if(partial_deviceC)
{
cudaFree(partial_deviceC);
partial_deviceC = NULL;
}
if(deviceB)
{
cudaFree(deviceB);
deviceB = NULL;
}
if(deviceA)
{
cudaFree(deviceA);
deviceA = NULL;
}
//free host memory
if(partial_hostC)
{
free(partial_hostC);
partial_hostC = NULL;
}
if(hostB)
{
free(hostB);
hostB = NULL;
}
if(hostA)
{
free(hostA);
hostA = NULL;
}
} | .file "tmpxft_000ce620_00000000-6_VectorDotProduct.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7cleanupv
.type _Z7cleanupv, @function
_Z7cleanupv:
.LFB2058:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq partial_deviceC(%rip), %rdi
testq %rdi, %rdi
je .L4
call cudaFree@PLT
movq $0, partial_deviceC(%rip)
.L4:
movq deviceB(%rip), %rdi
testq %rdi, %rdi
je .L5
call cudaFree@PLT
movq $0, deviceB(%rip)
.L5:
movq deviceA(%rip), %rdi
testq %rdi, %rdi
je .L6
call cudaFree@PLT
movq $0, deviceA(%rip)
.L6:
movq partial_hostC(%rip), %rdi
testq %rdi, %rdi
je .L7
call free@PLT
movq $0, partial_hostC(%rip)
.L7:
movq hostB(%rip), %rdi
testq %rdi, %rdi
je .L8
call free@PLT
movq $0, hostB(%rip)
.L8:
movq hostA(%rip), %rdi
testq %rdi, %rdi
je .L3
call free@PLT
movq $0, hostA(%rip)
.L3:
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z7cleanupv, .-_Z7cleanupv
.globl _Z37__device_stub__Z13vecDotProductPfS_S_PfS_S_
.type _Z37__device_stub__Z13vecDotProductPfS_S_PfS_S_, @function
_Z37__device_stub__Z13vecDotProductPfS_S_PfS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13vecDotProductPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z37__device_stub__Z13vecDotProductPfS_S_PfS_S_, .-_Z37__device_stub__Z13vecDotProductPfS_S_PfS_S_
.globl _Z13vecDotProductPfS_S_
.type _Z13vecDotProductPfS_S_, @function
_Z13vecDotProductPfS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z13vecDotProductPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z13vecDotProductPfS_S_, .-_Z13vecDotProductPfS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "CPU Memory Fatal Error - Can Not Allocate Enough Memory For Host Input Array 1.\nExiting Now ...\n"
.align 8
.LC2:
.string "CPU Memory Fatal Error - Can Not Allocate Enough Memory For Host Input Array 2.\nExiting Now ...\n"
.align 8
.LC3:
.string "CPU Memory Fatal Error - Can Not Allocate Enough Memory For Host Output Array.\nExiting Now ...\n"
.align 8
.LC4:
.string "/home/ubuntu/Datasets/stackv2/train-structured/yashPat98/HPP/master/CUDA/CUDA-by-Example/12-VectorDotProduct/VectorDotProduct.cu"
.align 8
.LC5:
.string "GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n"
.align 8
.LC7:
.string "Dot Product Calculated On Device Is Accurate.\n"
.align 8
.LC8:
.string "Dot Product Calculated On Device Is Not Accurate.\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC9:
.string "Dot Product = %0.6.\n"
.LC11:
.string "Expected Product = %0.6f.\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movl $135168, %edi
call malloc@PLT
movq %rax, hostA(%rip)
testq %rax, %rax
je .L39
movl $135168, %edi
call malloc@PLT
movq %rax, hostB(%rip)
testq %rax, %rax
je .L40
movl $128, %edi
call malloc@PLT
movq %rax, partial_hostC(%rip)
testq %rax, %rax
je .L41
movl $135168, %esi
leaq deviceA(%rip), %rdi
call cudaMalloc@PLT
testl %eax, %eax
jne .L42
movl $135168, %esi
leaq deviceB(%rip), %rdi
call cudaMalloc@PLT
testl %eax, %eax
jne .L43
movl $128, %esi
leaq partial_deviceC(%rip), %rdi
call cudaMalloc@PLT
testl %eax, %eax
jne .L44
movl $0, %eax
.L25:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movq hostA(%rip), %rdx
movss %xmm0, (%rdx,%rax,4)
leal (%rax,%rax), %edx
pxor %xmm0, %xmm0
cvtsi2ssl %edx, %xmm0
movq hostB(%rip), %rdx
movss %xmm0, (%rdx,%rax,4)
addq $1, %rax
cmpq $33792, %rax
jne .L25
movl $1, %ecx
movl $135168, %edx
movq hostA(%rip), %rsi
movq deviceA(%rip), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L45
movl $1, %ecx
movl $135168, %edx
movq hostB(%rip), %rsi
movq deviceB(%rip), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L46
movl $256, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $32, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L47
.L28:
movl $2, %ecx
movl $128, %edx
movq partial_deviceC(%rip), %rsi
movq partial_hostC(%rip), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L48
movq partial_hostC(%rip), %rax
leaq 128(%rax), %rdx
movl $0x00000000, 12(%rsp)
.L30:
movss 12(%rsp), %xmm1
addss (%rax), %xmm1
movss %xmm1, 12(%rsp)
addq $4, %rax
cmpq %rdx, %rax
jne .L30
ucomiss .LC6(%rip), %xmm1
jp .L31
jne .L31
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L33:
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movsd .LC10(%rip), %xmm0
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
call _Z7cleanupv
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
leaq .LC1(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L40:
leaq .LC2(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
call _Z7cleanupv
movl $1, %edi
call exit@PLT
.L41:
leaq .LC3(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
call _Z7cleanupv
movl $1, %edi
call exit@PLT
.L42:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $96, %r8d
leaq .LC4(%rip), %rcx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _Z7cleanupv
movl $1, %edi
call exit@PLT
.L43:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $104, %r8d
leaq .LC4(%rip), %rcx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _Z7cleanupv
movl $1, %edi
call exit@PLT
.L44:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $112, %r8d
leaq .LC4(%rip), %rcx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _Z7cleanupv
movl $1, %edi
call exit@PLT
.L45:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $128, %r8d
leaq .LC4(%rip), %rcx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _Z7cleanupv
movl $1, %edi
call exit@PLT
.L46:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $136, %r8d
leaq .LC4(%rip), %rcx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _Z7cleanupv
movl $1, %edi
call exit@PLT
.L47:
movq partial_deviceC(%rip), %rdx
movq deviceB(%rip), %rsi
movq deviceA(%rip), %rdi
call _Z37__device_stub__Z13vecDotProductPfS_S_PfS_S_
jmp .L28
.L48:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $148, %r8d
leaq .LC4(%rip), %rcx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _Z7cleanupv
movl $1, %edi
call exit@PLT
.L31:
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L33
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC12:
.string "_Z13vecDotProductPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z13vecDotProductPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl partial_deviceC
.bss
.align 8
.type partial_deviceC, @object
.size partial_deviceC, 8
partial_deviceC:
.zero 8
.globl deviceB
.align 8
.type deviceB, @object
.size deviceB, 8
deviceB:
.zero 8
.globl deviceA
.align 8
.type deviceA, @object
.size deviceA, 8
deviceA:
.zero 8
.globl partial_hostC
.align 8
.type partial_hostC, @object
.size partial_hostC, 8
partial_hostC:
.zero 8
.globl hostB
.align 8
.type hostB, @object
.size hostB, 8
hostB:
.zero 8
.globl hostA
.align 8
.type hostA, @object
.size hostA, 8
hostA:
.zero 8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC6:
.long 1438329312
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC10:
.long 0
.long 1119315260
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.