system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <curand_kernel.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#include <sys/time.h>
using namespace std;
__global__ void partition_step (curandState * state, unsigned long seed )
{
int i= blockDim.x * blockIdx.x + threadIdx.x;
curand_init (seed, i, 0, &state[i]);
}
__global__ void randomColouring (curandState* globalState, int *degreeCount, int n, int limit){
int i= blockDim.x * blockIdx.x + threadIdx.x;
curandState localState = globalState[i];
float RANDOM = curand_uniform( &localState );
globalState[i] = localState;
RANDOM *= (limit - 1 + 0.999999);
RANDOM += 1;
degreeCount[i] = (int) RANDOM;
}
__global__ void conflictDetection (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m, int *detectConflict){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int myColour = degreeCount[i];
int incoming = -1, stop = -1;
incoming = vertexArray[i];
if (i==n-1){
stop = m;
}
else{
stop = vertexArray[i+1];
}
for (int j=incoming; j<stop; j++){
if (degreeCount[neighbourArray[j]-1] == myColour){
detectConflict[i]=1;
break;
}
}
}
__global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int incoming = -1, stop = -1;
int diff=0;
incoming = vertexArray[i];
if (i==n-1){
stop = m;
}
else{
stop = vertexArray[i+1];
}
diff = stop-incoming;
atomicAdd(°reeCount[i], diff);
for (int j=incoming; j<stop; j++){
atomicAdd(°reeCount[neighbourArray[j]-1], 1);
}
}
int main(int argc, char const *argv[])
{
int n, m;
// Enter number of vertices and edges
cin>>n>>m;
int h_vertexArray[n];
int h_neighbourArray[m];
int h_degreeCount[n];
int h_detectConflict[n];
// Cuda memory allocation
size_t bytes = n*sizeof(int);
int *d_vertexArray = NULL;
cudaMalloc((void **)&d_vertexArray, bytes);
int *d_neighbourArray = NULL;
cudaMalloc((void **)&d_neighbourArray, m*sizeof(int));
int *d_detectConflict = NULL;
cudaMalloc((void **)&d_detectConflict, bytes);
cudaMemset((void *)d_detectConflict, 0,bytes);
int *d_degreeCount = NULL;
cudaMalloc((void **)&d_degreeCount, bytes);
cudaMemset((void *)d_degreeCount, 0, bytes);
curandState* partition_states;
cudaMalloc ( &partition_states, n*sizeof( curandState ) );
for (int i = 0; i < n; ++i)
{
/* code */
h_vertexArray[i]=m;
}
int temp = 0;
int current = 0;
int mark = 1;
// Add the graph based on input file
for (int i = 0; i < m; ++i)
{
/* code */
int incoming;
int end;
cin>>incoming>>end;
incoming++;
end++;
if (incoming!=mark){
if (incoming == mark+1 && h_vertexArray[mark-1]!=m){
}
else{
for (int j = mark; j<incoming; j++){
h_vertexArray[j-1]=temp;
}
}
mark = incoming;
}
if (incoming==current){
h_neighbourArray[temp]=end;
temp++;
}
else {
current = incoming;
h_vertexArray[current-1]=temp;
h_neighbourArray[temp]=end;
temp++;
}
}
cudaMemcpy(d_vertexArray, h_vertexArray, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_neighbourArray, h_neighbourArray, m*sizeof(int), cudaMemcpyHostToDevice);
int threadsPerBlock = 512;
int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock;
struct timeval startTime;
struct timeval endTime;
struct timezone startZone;
struct timezone endZone;
long startt,endt;
double overhead;
cout<<threadsPerBlock<<" "<<blocksPerGrid<<endl;
gettimeofday(&startTime,&startZone);
// Step 0 : Calculate degree of each vertex
degreeCalc<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m);
thrust::device_ptr<int> d_ptr = thrust::device_pointer_cast(d_degreeCount);
int max = *(thrust::max_element(d_ptr, d_ptr + n));
cout<<"Max number of colours = "<<max<<endl;
partition_step <<<blocksPerGrid, threadsPerBlock>>> ( partition_states, time(NULL) );
// Step 1 - Randomly assign colours
randomColouring<<<blocksPerGrid, threadsPerBlock>>>(partition_states, d_degreeCount, n, max);
cudaMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), cudaMemcpyDeviceToHost);
cout<<"randomColouring"<<endl;
for (int i=0; i<n; i++){
cout<<"Color of"<<i<<": "<<h_degreeCount[i]<<endl;
}
cout<<endl;
conflictDetection<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_detectConflict);
thrust::device_ptr<int> d_detectConflict_ptr = thrust::device_pointer_cast(d_detectConflict);
int count1 = thrust::reduce(d_detectConflict_ptr, d_detectConflict_ptr + n);
cudaMemcpy(h_detectConflict, d_detectConflict, n*sizeof(int), cudaMemcpyDeviceToHost);
int countnew=0;
int old_colors[n];
for (int i = 0; i < n; ++i)
{
/* code */
old_colors[i] = -1;
}
for (int i=0; i<n-1; i++){
if (h_detectConflict[i]==0){
continue;
}
countnew++;
bool usedColours[n];
fill(usedColours, usedColours+n, false);
int incoming = -1, stop = -1;
incoming = h_vertexArray[i];
stop = h_vertexArray[i+1];
old_colors[i] = h_degreeCount[i];
for (int j=incoming; j<stop; j++){
usedColours[h_degreeCount[h_neighbourArray[j]-1]-1] = true;
}
for (int j=0; j<n; j++){
if (usedColours[j]==false){
h_degreeCount[i]=j+1;
break;
}
}
}
if (h_detectConflict[n-1]!=0){
bool usedColours[n];
countnew++;
fill(usedColours, usedColours+n, false);
int incoming = -1, stop = -1;
incoming = h_vertexArray[n-1];
stop = m;
for (int j=incoming; j<stop; j++){
usedColours[h_degreeCount[h_neighbourArray[j]-1]-1] = true;
}
for (int j=0; j<n; j++){
if (usedColours[j]==false){
h_degreeCount[n-1]=j+1;
break;
}
}
}
for (int i = 0; i < n; ++i)
{
cout<<"Colour of i from" <<i <<" "<<old_colors[i]<<":"<<h_degreeCount[i]<<endl;
}
cudaMemset((void *)d_detectConflict, 0, (n)*sizeof(int));
cudaMemcpy(d_degreeCount, h_degreeCount, n*sizeof(int), cudaMemcpyHostToDevice);
conflictDetection<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_detectConflict);
gettimeofday(&endTime,&endZone);
startt = startTime.tv_sec*1000000+startTime.tv_usec;
endt = endTime.tv_sec*1000000+endTime.tv_usec;
overhead = (endt-startt)/1000000.0;
count1 = thrust::reduce(d_detectConflict_ptr, d_detectConflict_ptr + n);
cout<<"Count: "<<count1<<" "<<countnew<<endl;
cout<<"time taken is"<<overhead<<endl;
cudaFree(d_neighbourArray);
cudaFree(d_vertexArray);
cudaFree(d_degreeCount);
cudaFree(d_detectConflict);
cudaDeviceReset();
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
#include <cstdlib>
#include <cstdio>
#include <hiprand/hiprand_kernel.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#include <sys/time.h>
using namespace std;
__global__ void partition_step (hiprandState * state, unsigned long seed )
{
int i= blockDim.x * blockIdx.x + threadIdx.x;
hiprand_init (seed, i, 0, &state[i]);
}
__global__ void randomColouring (hiprandState* globalState, int *degreeCount, int n, int limit){
int i= blockDim.x * blockIdx.x + threadIdx.x;
hiprandState localState = globalState[i];
float RANDOM = hiprand_uniform( &localState );
globalState[i] = localState;
RANDOM *= (limit - 1 + 0.999999);
RANDOM += 1;
degreeCount[i] = (int) RANDOM;
}
__global__ void conflictDetection (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m, int *detectConflict){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int myColour = degreeCount[i];
int incoming = -1, stop = -1;
incoming = vertexArray[i];
if (i==n-1){
stop = m;
}
else{
stop = vertexArray[i+1];
}
for (int j=incoming; j<stop; j++){
if (degreeCount[neighbourArray[j]-1] == myColour){
detectConflict[i]=1;
break;
}
}
}
__global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int incoming = -1, stop = -1;
int diff=0;
incoming = vertexArray[i];
if (i==n-1){
stop = m;
}
else{
stop = vertexArray[i+1];
}
diff = stop-incoming;
atomicAdd(°reeCount[i], diff);
for (int j=incoming; j<stop; j++){
atomicAdd(°reeCount[neighbourArray[j]-1], 1);
}
}
int main(int argc, char const *argv[])
{
int n, m;
// Enter number of vertices and edges
cin>>n>>m;
int h_vertexArray[n];
int h_neighbourArray[m];
int h_degreeCount[n];
int h_detectConflict[n];
// Cuda memory allocation
size_t bytes = n*sizeof(int);
int *d_vertexArray = NULL;
hipMalloc((void **)&d_vertexArray, bytes);
int *d_neighbourArray = NULL;
hipMalloc((void **)&d_neighbourArray, m*sizeof(int));
int *d_detectConflict = NULL;
hipMalloc((void **)&d_detectConflict, bytes);
hipMemset((void *)d_detectConflict, 0,bytes);
int *d_degreeCount = NULL;
hipMalloc((void **)&d_degreeCount, bytes);
hipMemset((void *)d_degreeCount, 0, bytes);
hiprandState* partition_states;
hipMalloc ( &partition_states, n*sizeof( hiprandState ) );
for (int i = 0; i < n; ++i)
{
/* code */
h_vertexArray[i]=m;
}
int temp = 0;
int current = 0;
int mark = 1;
// Add the graph based on input file
for (int i = 0; i < m; ++i)
{
/* code */
int incoming;
int end;
cin>>incoming>>end;
incoming++;
end++;
if (incoming!=mark){
if (incoming == mark+1 && h_vertexArray[mark-1]!=m){
}
else{
for (int j = mark; j<incoming; j++){
h_vertexArray[j-1]=temp;
}
}
mark = incoming;
}
if (incoming==current){
h_neighbourArray[temp]=end;
temp++;
}
else {
current = incoming;
h_vertexArray[current-1]=temp;
h_neighbourArray[temp]=end;
temp++;
}
}
hipMemcpy(d_vertexArray, h_vertexArray, n*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_neighbourArray, h_neighbourArray, m*sizeof(int), hipMemcpyHostToDevice);
int threadsPerBlock = 512;
int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock;
struct timeval startTime;
struct timeval endTime;
struct timezone startZone;
struct timezone endZone;
long startt,endt;
double overhead;
cout<<threadsPerBlock<<" "<<blocksPerGrid<<endl;
gettimeofday(&startTime,&startZone);
// Step 0 : Calculate degree of each vertex
degreeCalc<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m);
thrust::device_ptr<int> d_ptr = thrust::device_pointer_cast(d_degreeCount);
int max = *(thrust::max_element(d_ptr, d_ptr + n));
cout<<"Max number of colours = "<<max<<endl;
partition_step <<<blocksPerGrid, threadsPerBlock>>> ( partition_states, time(NULL) );
// Step 1 - Randomly assign colours
randomColouring<<<blocksPerGrid, threadsPerBlock>>>(partition_states, d_degreeCount, n, max);
hipMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), hipMemcpyDeviceToHost);
cout<<"randomColouring"<<endl;
for (int i=0; i<n; i++){
cout<<"Color of"<<i<<": "<<h_degreeCount[i]<<endl;
}
cout<<endl;
conflictDetection<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_detectConflict);
thrust::device_ptr<int> d_detectConflict_ptr = thrust::device_pointer_cast(d_detectConflict);
int count1 = thrust::reduce(d_detectConflict_ptr, d_detectConflict_ptr + n);
hipMemcpy(h_detectConflict, d_detectConflict, n*sizeof(int), hipMemcpyDeviceToHost);
int countnew=0;
int old_colors[n];
for (int i = 0; i < n; ++i)
{
/* code */
old_colors[i] = -1;
}
for (int i=0; i<n-1; i++){
if (h_detectConflict[i]==0){
continue;
}
countnew++;
bool usedColours[n];
fill(usedColours, usedColours+n, false);
int incoming = -1, stop = -1;
incoming = h_vertexArray[i];
stop = h_vertexArray[i+1];
old_colors[i] = h_degreeCount[i];
for (int j=incoming; j<stop; j++){
usedColours[h_degreeCount[h_neighbourArray[j]-1]-1] = true;
}
for (int j=0; j<n; j++){
if (usedColours[j]==false){
h_degreeCount[i]=j+1;
break;
}
}
}
if (h_detectConflict[n-1]!=0){
bool usedColours[n];
countnew++;
fill(usedColours, usedColours+n, false);
int incoming = -1, stop = -1;
incoming = h_vertexArray[n-1];
stop = m;
for (int j=incoming; j<stop; j++){
usedColours[h_degreeCount[h_neighbourArray[j]-1]-1] = true;
}
for (int j=0; j<n; j++){
if (usedColours[j]==false){
h_degreeCount[n-1]=j+1;
break;
}
}
}
for (int i = 0; i < n; ++i)
{
cout<<"Colour of i from" <<i <<" "<<old_colors[i]<<":"<<h_degreeCount[i]<<endl;
}
hipMemset((void *)d_detectConflict, 0, (n)*sizeof(int));
hipMemcpy(d_degreeCount, h_degreeCount, n*sizeof(int), hipMemcpyHostToDevice);
conflictDetection<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_detectConflict);
gettimeofday(&endTime,&endZone);
startt = startTime.tv_sec*1000000+startTime.tv_usec;
endt = endTime.tv_sec*1000000+endTime.tv_usec;
overhead = (endt-startt)/1000000.0;
count1 = thrust::reduce(d_detectConflict_ptr, d_detectConflict_ptr + n);
cout<<"Count: "<<count1<<" "<<countnew<<endl;
cout<<"time taken is"<<overhead<<endl;
hipFree(d_neighbourArray);
hipFree(d_vertexArray);
hipFree(d_degreeCount);
hipFree(d_detectConflict);
hipDeviceReset();
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | template<typename T>
__global__
void init_array_kernel(T *gpu_array, size_t n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
new (&gpu_array[i]) T();
}
}
template<class T, class TransformedT, class TransformationT>
__global__
void transform_kernel(
T *from_array, size_t n,
TransformedT *to_array,
TransformationT transform) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
to_array[i] = transform(from_array[i], i);
}
}
template<typename T, typename Transformation>
__global__
void for_each_kernel(T *gpu_array, size_t n, Transformation fn) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
fn(gpu_array[i], i);
}
}
template<typename T, typename Reduction>
__global__
void reduce_2step_kernel(
T *gpu_array, size_t n,
T *out,
Reduction fn, T initial_value=T{}) { /*
Log-reduction based from the one in the book "The CUDA Handbook" by
Nicholas Wilt.
*/
extern __shared__ T partials[];
const int tid = threadIdx.x;
auto reduced = initial_value;
for (int i = blockIdx.x * blockDim.x + tid;
i < n;
i += blockDim.x * gridDim.x) {
reduced = fn(reduced, gpu_array[i]);
}
partials[tid] = reduced;
__syncthreads();
for (int active_threads = blockDim.x / 2;
active_threads > 0;
active_threads /= 2) {
auto is_active_thread = tid < active_threads;
if (is_active_thread) {
partials[tid] = fn(partials[tid], partials[tid + active_threads]);
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = partials[0];
}
}
/* -----------------------------------------------------------------------
These kernels are used and tested on the gpu_array and gpu_object classes.
*/ | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | template<typename T>
__global__
void init_array_kernel(T *gpu_array, size_t n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
new (&gpu_array[i]) T();
}
}
template<class T, class TransformedT, class TransformationT>
__global__
void transform_kernel(
T *from_array, size_t n,
TransformedT *to_array,
TransformationT transform) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
to_array[i] = transform(from_array[i], i);
}
}
template<typename T, typename Transformation>
__global__
void for_each_kernel(T *gpu_array, size_t n, Transformation fn) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
fn(gpu_array[i], i);
}
}
template<typename T, typename Reduction>
__global__
void reduce_2step_kernel(
T *gpu_array, size_t n,
T *out,
Reduction fn, T initial_value=T{}) { /*
Log-reduction based from the one in the book "The CUDA Handbook" by
Nicholas Wilt.
*/
extern __shared__ T partials[];
const int tid = threadIdx.x;
auto reduced = initial_value;
for (int i = blockIdx.x * blockDim.x + tid;
i < n;
i += blockDim.x * gridDim.x) {
reduced = fn(reduced, gpu_array[i]);
}
partials[tid] = reduced;
__syncthreads();
for (int active_threads = blockDim.x / 2;
active_threads > 0;
active_threads /= 2) {
auto is_active_thread = tid < active_threads;
if (is_active_thread) {
partials[tid] = fn(partials[tid], partials[tid + active_threads]);
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = partials[0];
}
}
/* -----------------------------------------------------------------------
These kernels are used and tested on the gpu_array and gpu_object classes.
*/ | .file "tmpxft_0007addc_00000000-6_kernels.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2037:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2037:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | template<typename T>
__global__
void init_array_kernel(T *gpu_array, size_t n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
new (&gpu_array[i]) T();
}
}
template<class T, class TransformedT, class TransformationT>
__global__
void transform_kernel(
T *from_array, size_t n,
TransformedT *to_array,
TransformationT transform) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
to_array[i] = transform(from_array[i], i);
}
}
template<typename T, typename Transformation>
__global__
void for_each_kernel(T *gpu_array, size_t n, Transformation fn) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
fn(gpu_array[i], i);
}
}
template<typename T, typename Reduction>
__global__
void reduce_2step_kernel(
T *gpu_array, size_t n,
T *out,
Reduction fn, T initial_value=T{}) { /*
Log-reduction based from the one in the book "The CUDA Handbook" by
Nicholas Wilt.
*/
extern __shared__ T partials[];
const int tid = threadIdx.x;
auto reduced = initial_value;
for (int i = blockIdx.x * blockDim.x + tid;
i < n;
i += blockDim.x * gridDim.x) {
reduced = fn(reduced, gpu_array[i]);
}
partials[tid] = reduced;
__syncthreads();
for (int active_threads = blockDim.x / 2;
active_threads > 0;
active_threads /= 2) {
auto is_active_thread = tid < active_threads;
if (is_active_thread) {
partials[tid] = fn(partials[tid], partials[tid + active_threads]);
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = partials[0];
}
}
/* -----------------------------------------------------------------------
These kernels are used and tested on the gpu_array and gpu_object classes.
*/ | #include <hip/hip_runtime.h>
template<typename T>
__global__
void init_array_kernel(T *gpu_array, size_t n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
new (&gpu_array[i]) T();
}
}
template<class T, class TransformedT, class TransformationT>
__global__
void transform_kernel(
T *from_array, size_t n,
TransformedT *to_array,
TransformationT transform) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
to_array[i] = transform(from_array[i], i);
}
}
template<typename T, typename Transformation>
__global__
void for_each_kernel(T *gpu_array, size_t n, Transformation fn) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
fn(gpu_array[i], i);
}
}
template<typename T, typename Reduction>
__global__
void reduce_2step_kernel(
T *gpu_array, size_t n,
T *out,
Reduction fn, T initial_value=T{}) { /*
Log-reduction based from the one in the book "The CUDA Handbook" by
Nicholas Wilt.
*/
extern __shared__ T partials[];
const int tid = threadIdx.x;
auto reduced = initial_value;
for (int i = blockIdx.x * blockDim.x + tid;
i < n;
i += blockDim.x * gridDim.x) {
reduced = fn(reduced, gpu_array[i]);
}
partials[tid] = reduced;
__syncthreads();
for (int active_threads = blockDim.x / 2;
active_threads > 0;
active_threads /= 2) {
auto is_active_thread = tid < active_threads;
if (is_active_thread) {
partials[tid] = fn(partials[tid], partials[tid + active_threads]);
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = partials[0];
}
}
/* -----------------------------------------------------------------------
These kernels are used and tested on the gpu_array and gpu_object classes.
*/ |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
template<typename T>
__global__
void init_array_kernel(T *gpu_array, size_t n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
new (&gpu_array[i]) T();
}
}
template<class T, class TransformedT, class TransformationT>
__global__
void transform_kernel(
T *from_array, size_t n,
TransformedT *to_array,
TransformationT transform) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
to_array[i] = transform(from_array[i], i);
}
}
template<typename T, typename Transformation>
__global__
void for_each_kernel(T *gpu_array, size_t n, Transformation fn) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
fn(gpu_array[i], i);
}
}
template<typename T, typename Reduction>
__global__
void reduce_2step_kernel(
T *gpu_array, size_t n,
T *out,
Reduction fn, T initial_value=T{}) { /*
Log-reduction based from the one in the book "The CUDA Handbook" by
Nicholas Wilt.
*/
extern __shared__ T partials[];
const int tid = threadIdx.x;
auto reduced = initial_value;
for (int i = blockIdx.x * blockDim.x + tid;
i < n;
i += blockDim.x * gridDim.x) {
reduced = fn(reduced, gpu_array[i]);
}
partials[tid] = reduced;
__syncthreads();
for (int active_threads = blockDim.x / 2;
active_threads > 0;
active_threads /= 2) {
auto is_active_thread = tid < active_threads;
if (is_active_thread) {
partials[tid] = fn(partials[tid], partials[tid + active_threads]);
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = partials[0];
}
}
/* -----------------------------------------------------------------------
These kernels are used and tested on the gpu_array and gpu_object classes.
*/ | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
template<typename T>
__global__
void init_array_kernel(T *gpu_array, size_t n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
new (&gpu_array[i]) T();
}
}
template<class T, class TransformedT, class TransformationT>
__global__
void transform_kernel(
T *from_array, size_t n,
TransformedT *to_array,
TransformationT transform) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
to_array[i] = transform(from_array[i], i);
}
}
template<typename T, typename Transformation>
__global__
void for_each_kernel(T *gpu_array, size_t n, Transformation fn) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
fn(gpu_array[i], i);
}
}
template<typename T, typename Reduction>
__global__
void reduce_2step_kernel(
T *gpu_array, size_t n,
T *out,
Reduction fn, T initial_value=T{}) { /*
Log-reduction based from the one in the book "The CUDA Handbook" by
Nicholas Wilt.
*/
extern __shared__ T partials[];
const int tid = threadIdx.x;
auto reduced = initial_value;
for (int i = blockIdx.x * blockDim.x + tid;
i < n;
i += blockDim.x * gridDim.x) {
reduced = fn(reduced, gpu_array[i]);
}
partials[tid] = reduced;
__syncthreads();
for (int active_threads = blockDim.x / 2;
active_threads > 0;
active_threads /= 2) {
auto is_active_thread = tid < active_threads;
if (is_active_thread) {
partials[tid] = fn(partials[tid], partials[tid + active_threads]);
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = partials[0];
}
}
/* -----------------------------------------------------------------------
These kernels are used and tested on the gpu_array and gpu_object classes.
*/ | .text
.file "kernels.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0007addc_00000000-6_kernels.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2037:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2037:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernels.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void sync_conv_groups() { } | code for sm_80
Function : _Z16sync_conv_groupsv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void sync_conv_groups() { } | .file "tmpxft_000724d1_00000000-6_sync_conv_groups.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z16sync_conv_groupsvv
.type _Z35__device_stub__Z16sync_conv_groupsvv, @function
_Z35__device_stub__Z16sync_conv_groupsvv:
.LFB2051:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z16sync_conv_groupsv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z35__device_stub__Z16sync_conv_groupsvv, .-_Z35__device_stub__Z16sync_conv_groupsvv
.globl _Z16sync_conv_groupsv
.type _Z16sync_conv_groupsv, @function
_Z16sync_conv_groupsv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z16sync_conv_groupsvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z16sync_conv_groupsv, .-_Z16sync_conv_groupsv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z16sync_conv_groupsv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z16sync_conv_groupsv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void sync_conv_groups() { } | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void sync_conv_groups() { } |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void sync_conv_groups() { } | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16sync_conv_groupsv
.globl _Z16sync_conv_groupsv
.p2align 8
.type _Z16sync_conv_groupsv,@function
_Z16sync_conv_groupsv:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16sync_conv_groupsv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16sync_conv_groupsv, .Lfunc_end0-_Z16sync_conv_groupsv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16sync_conv_groupsv
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z16sync_conv_groupsv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void sync_conv_groups() { } | .text
.file "sync_conv_groups.hip"
.globl _Z31__device_stub__sync_conv_groupsv # -- Begin function _Z31__device_stub__sync_conv_groupsv
.p2align 4, 0x90
.type _Z31__device_stub__sync_conv_groupsv,@function
_Z31__device_stub__sync_conv_groupsv: # @_Z31__device_stub__sync_conv_groupsv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z16sync_conv_groupsv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z31__device_stub__sync_conv_groupsv, .Lfunc_end0-_Z31__device_stub__sync_conv_groupsv
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16sync_conv_groupsv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16sync_conv_groupsv,@object # @_Z16sync_conv_groupsv
.section .rodata,"a",@progbits
.globl _Z16sync_conv_groupsv
.p2align 3, 0x0
_Z16sync_conv_groupsv:
.quad _Z31__device_stub__sync_conv_groupsv
.size _Z16sync_conv_groupsv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16sync_conv_groupsv"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__sync_conv_groupsv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16sync_conv_groupsv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z16sync_conv_groupsv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16sync_conv_groupsv
.globl _Z16sync_conv_groupsv
.p2align 8
.type _Z16sync_conv_groupsv,@function
_Z16sync_conv_groupsv:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16sync_conv_groupsv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16sync_conv_groupsv, .Lfunc_end0-_Z16sync_conv_groupsv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16sync_conv_groupsv
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z16sync_conv_groupsv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000724d1_00000000-6_sync_conv_groups.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z16sync_conv_groupsvv
.type _Z35__device_stub__Z16sync_conv_groupsvv, @function
_Z35__device_stub__Z16sync_conv_groupsvv:
.LFB2051:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z16sync_conv_groupsv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z35__device_stub__Z16sync_conv_groupsvv, .-_Z35__device_stub__Z16sync_conv_groupsvv
.globl _Z16sync_conv_groupsv
.type _Z16sync_conv_groupsv, @function
_Z16sync_conv_groupsv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z16sync_conv_groupsvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z16sync_conv_groupsv, .-_Z16sync_conv_groupsv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z16sync_conv_groupsv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z16sync_conv_groupsv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "sync_conv_groups.hip"
.globl _Z31__device_stub__sync_conv_groupsv # -- Begin function _Z31__device_stub__sync_conv_groupsv
.p2align 4, 0x90
.type _Z31__device_stub__sync_conv_groupsv,@function
_Z31__device_stub__sync_conv_groupsv: # @_Z31__device_stub__sync_conv_groupsv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z16sync_conv_groupsv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z31__device_stub__sync_conv_groupsv, .Lfunc_end0-_Z31__device_stub__sync_conv_groupsv
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16sync_conv_groupsv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16sync_conv_groupsv,@object # @_Z16sync_conv_groupsv
.section .rodata,"a",@progbits
.globl _Z16sync_conv_groupsv
.p2align 3, 0x0
_Z16sync_conv_groupsv:
.quad _Z31__device_stub__sync_conv_groupsv
.size _Z16sync_conv_groupsv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16sync_conv_groupsv"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__sync_conv_groupsv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16sync_conv_groupsv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cstdio>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime_api.h>
#define restrict __restrict__
using namespace std;
int error(const char *msg) {
fprintf(stderr, "%s\n", msg);
exit(1);
}
void cuda_check(cudaError_t err, const char *msg)
{
if (err != cudaSuccess) {
fprintf(stderr, "%s: errore %d - %s\n",
msg, err, cudaGetErrorString(err));
exit(1);
}
}
//inizializzazione su CPU con numeri random
void init_random(int vett[],int nels,int max)
{
srand(time(NULL));
for(int i=0;i<nels;++i)
vett[i]=rand()% max+ 1;
}
//verifica con numeri random
bool verify_random(const int* scan_out, int nels)
{
for(int i=0;i<nels-1;++i)
if(scan_out[i]>scan_out[i+1])
{
fprintf(stderr, "errore tra le posizioni %d e %d \n", i,i+1);
return false;
}
return true;
}
//verifica con numeri ordinati al contrario partendo da nels fino ad arrivare ad 1
bool verify(const int* scan_out, int nels)
{
int err=0;
for (int i = 0; i < nels; ++i) {
if(i+1!=scan_out[i])
{
fprintf(stderr, "verify,idx=%d: val_scan:%d \n", i,scan_out[i]);
err=1;
}
}
if(err)
return false;
return true;
}
//inizializzazione su GPU
__global__ void init(int *vec, int nels)
{
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx < nels)
vec[idx] = nels-idx;
}
extern __shared__ int4 shared[];
__device__ void scan_delle_code(int4 coda)
{
__syncthreads();
shared[threadIdx.x] = coda;
for (int offset = 1; offset < blockDim.x; offset *= 2) {
__syncthreads();
if (threadIdx.x >= offset)
{
coda.x += shared[threadIdx.x - offset].x;
coda.y += shared[threadIdx.x - offset].y;
coda.z += shared[threadIdx.x - offset].z;
coda.w += shared[threadIdx.x - offset].w;
}
__syncthreads();
shared[threadIdx.x] = coda;
}
__syncthreads();
}
//primo scan
__global__ void scan_step1(int4 * restrict out0,int4 * restrict out1,int4 * restrict out2,int4 * restrict out3, const int4 * restrict in, int nels /* numero di quartine */, int * restrict code0,int * restrict code1,int * restrict code2,int * restrict code3, int nbit)
{
int els_per_sezione = (nels + gridDim.x - 1)/gridDim.x;
int idx = threadIdx.x + blockIdx.x*els_per_sezione;
int4 val,val0,val1,val2,val3;
int4 correzione_dal_blocco_precedente = make_int4(0,0,0,0);
int numero_cicli = (els_per_sezione + blockDim.x - 1)/blockDim.x;
int elemento_limite = min(els_per_sezione*(blockIdx.x+1), nels);
for (int ciclo = 0; ciclo < numero_cicli;++ciclo, idx += blockDim.x)
{
val = (idx < elemento_limite ? in[idx] : make_int4(0, 0, 0, 0));
val0= make_int4(0,0,0,0);
val1= make_int4(0,0,0,0);
val2= make_int4(0,0,0,0);
val3= make_int4(0,0,0,0);
//basta fare solo 3 controlli sui bit,il quarto cofronto (00 per comodità) è complementare!
//controllo sul primo valore della quartina (x)
if(((val.x>>nbit)&3)==1)
val1.x=1;
else if(((val.x>>nbit)&3)==2)
val2.x=1;
else if(((val.x>>nbit)&3)==3)
val3.x=1;
else
val0.x=1;
//controllo sulla seconda componente della quartina (y)
if(((val.y>>nbit)&3)==1)
val1.y=1;
else if(((val.y>>nbit)&3)==2)
val2.y=1;
else if(((val.y>>nbit)&3)==3)
val3.y=1;
else
val0.y=1;
//controllo sulla terza componente della quartina (z)
if(((val.z>>nbit)&3)==1)
val1.z=1;
else if(((val.z>>nbit)&3)==2)
val2.z=1;
else if(((val.z>>nbit)&3)==3)
val3.z=1;
else
val0.z=1;
//controllo sulla quarta componente della quartina (w)
if(((val.w>>nbit)&3)==1)
val1.w=1;
else if(((val.w>>nbit)&3)==2)
val2.w=1;
else if(((val.w>>nbit)&3)==3)
val3.w=1;
else
val0.w=1;
/* scan delle componenti dei val */
val0.y += val0.x;
val0.z += val0.y;
val0.w += val0.z;
val1.y += val1.x;
val1.z += val1.y;
val1.w += val1.z;
val2.y += val2.x;
val2.z += val2.y;
val2.w += val2.z;
val3.y += val3.x;
val3.z += val3.y;
val3.w += val3.z;
int4 coda=make_int4(val0.w,val1.w,val2.w,val3.w);
scan_delle_code(coda);
int4 correzione_dai_thread_precedenti = make_int4(0,0,0,0);
if (threadIdx.x > 0)
correzione_dai_thread_precedenti = shared[threadIdx.x-1];
int correzione_totale = correzione_dal_blocco_precedente.x + correzione_dai_thread_precedenti.x;
val0.x += correzione_totale;
val0.y += correzione_totale;
val0.z += correzione_totale;
val0.w += correzione_totale;
correzione_totale = correzione_dal_blocco_precedente.y + correzione_dai_thread_precedenti.y;
val1.x += correzione_totale;
val1.y += correzione_totale;
val1.z += correzione_totale;
val1.w += correzione_totale;
correzione_totale = correzione_dal_blocco_precedente.z + correzione_dai_thread_precedenti.z;
val2.x += correzione_totale;
val2.y += correzione_totale;
val2.z += correzione_totale;
val2.w += correzione_totale;
correzione_totale = correzione_dal_blocco_precedente.w + correzione_dai_thread_precedenti.w;
val3.x += correzione_totale;
val3.y += correzione_totale;
val3.z += correzione_totale;
val3.w += correzione_totale;
correzione_dal_blocco_precedente.x += shared[blockDim.x-1].x; //correzione di 00 (0)
correzione_dal_blocco_precedente.y += shared[blockDim.x-1].y; //correzione di 01 (1)
correzione_dal_blocco_precedente.z += shared[blockDim.x-1].z; //correzione di 10 (2)
correzione_dal_blocco_precedente.w += shared[blockDim.x-1].w; //correzione di 11 (3)
if (idx < nels)
{
out0[idx] = val0;
out1[idx] = val1;
out2[idx] = val2;
out3[idx] = val3;
}
}
if (gridDim.x > 1 && threadIdx.x == blockDim.x - 1)
{
code0[blockIdx.x] = val0.w;
code1[blockIdx.x] = val1.w;
code2[blockIdx.x] = val2.w;
code3[blockIdx.x] = val3.w;
}
}
//secondo scan fatto sulo sui vettori di code
__global__ void scan_step2(int4 * restrict code0, int4 * restrict code1, int4 * restrict code2, int4 * restrict code3, int nels /* numero di quartine */)
{
int els_per_sezione = (nels + gridDim.x - 1)/gridDim.x;
int idx = threadIdx.x + blockIdx.x*els_per_sezione;
int4 val0,val1,val2,val3;
int4 correzione_dal_blocco_precedente = make_int4(0,0,0,0);
int numero_cicli = (els_per_sezione + blockDim.x - 1)/blockDim.x;
int elemento_limite = min(els_per_sezione*(blockIdx.x+1), nels);
for (int ciclo = 0; ciclo < numero_cicli;++ciclo, idx += blockDim.x)
{
val0 = (idx < elemento_limite ? code0[idx] : make_int4(0, 0, 0, 0));
val1 = (idx < elemento_limite ? code1[idx] : make_int4(0, 0, 0, 0));
val2 = (idx < elemento_limite ? code2[idx] : make_int4(0, 0, 0, 0));
val3 = (idx < elemento_limite ? code3[idx] : make_int4(0, 0, 0, 0));
/* scan delle componenti di val */
val0.y += val0.x;
val0.z += val0.y;
val0.w += val0.z;
val1.y += val1.x;
val1.z += val1.y;
val1.w += val1.z;
val2.y += val2.x;
val2.z += val2.y;
val2.w += val2.z;
val3.y += val3.x;
val3.z += val3.y;
val3.w += val3.z;
//da modificare anche scan_delle_code
int4 coda=make_int4(val0.w,val1.w,val2.w,val3.w);
scan_delle_code(coda);
int4 correzione_dai_thread_precedenti = make_int4(0,0,0,0);
if (threadIdx.x > 0)
correzione_dai_thread_precedenti =
shared[threadIdx.x-1];
int correzione_totale = correzione_dal_blocco_precedente.x + correzione_dai_thread_precedenti.x;
val0.x += correzione_totale;
val0.y += correzione_totale;
val0.z += correzione_totale;
val0.w += correzione_totale;
correzione_totale = correzione_dal_blocco_precedente.y + correzione_dai_thread_precedenti.y;
val1.x += correzione_totale;
val1.y += correzione_totale;
val1.z += correzione_totale;
val1.w += correzione_totale;
correzione_totale = correzione_dal_blocco_precedente.z + correzione_dai_thread_precedenti.z;
val2.x += correzione_totale;
val2.y += correzione_totale;
val2.z += correzione_totale;
val2.w += correzione_totale;
correzione_totale = correzione_dal_blocco_precedente.w + correzione_dai_thread_precedenti.w;
val3.x += correzione_totale;
val3.y += correzione_totale;
val3.z += correzione_totale;
val3.w += correzione_totale;
correzione_dal_blocco_precedente.x += shared[blockDim.x-1].x; //correzione di 00 (0)
correzione_dal_blocco_precedente.y += shared[blockDim.x-1].y; //correzione di 01 (1)
correzione_dal_blocco_precedente.z += shared[blockDim.x-1].z; //correzione di 10 (2)
correzione_dal_blocco_precedente.w += shared[blockDim.x-1].w; //correzione di 11 (3)
if (idx < nels)
{
code0[idx] = val0;
code1[idx] = val1;
code2[idx] = val2;
code3[idx] = val3;
}
}
}
__global__ void fixup(int4 * restrict scan0,int4 * restrict scan1,int4 * restrict scan2,int4 * restrict scan3,int nels ,const int * restrict code0,const int * restrict code1,const int * restrict code2,const int * restrict code3,const int4* restrict in,int nbit,int4* max)
{
int els_per_sezione = (nels + gridDim.x - 1)/gridDim.x;
int idx = threadIdx.x + blockIdx.x*els_per_sezione;
int4 correzione_dal_blocco_precedente =make_int4(0,0,0,0);
if(blockIdx.x>0)
{
correzione_dal_blocco_precedente.x = code0[blockIdx.x - 1];
correzione_dal_blocco_precedente.y = code1[blockIdx.x - 1];
correzione_dal_blocco_precedente.z = code2[blockIdx.x - 1];
correzione_dal_blocco_precedente.w = code3[blockIdx.x - 1];
}
int numero_cicli = (els_per_sezione + blockDim.x - 1)/blockDim.x;
int elemento_limite = min(els_per_sezione*(blockIdx.x+1), nels);
for (int ciclo = 0; ciclo < numero_cicli;++ciclo, idx += blockDim.x) {
if (idx < elemento_limite)
{
int4 val0 = scan0[idx], val1=scan1[idx], val2=scan2[idx], val3=scan3[idx];
int4 val_in=in[idx];
if(idx==nels-1)
{ //salvataggio in memoria globale degli ultimi elementi degli scan inclusivi
(*max).x=val0.w+correzione_dal_blocco_precedente.x;
(*max).y=val1.w+correzione_dal_blocco_precedente.y;
(*max).z=val2.w+correzione_dal_blocco_precedente.z;
(*max).w=val3.w+correzione_dal_blocco_precedente.w;
}
//trasformazione degli scan da inclusivi ad esclusivi
val0.x += correzione_dal_blocco_precedente.x - ((((val_in.x>>nbit)&3)==0)?1:0);
val0.y += correzione_dal_blocco_precedente.x - ((((val_in.y>>nbit)&3)==0)?1:0);
val0.z += correzione_dal_blocco_precedente.x - ((((val_in.z>>nbit)&3)==0)?1:0);
val0.w += correzione_dal_blocco_precedente.x - ((((val_in.w>>nbit)&3)==0)?1:0);
val1.x += correzione_dal_blocco_precedente.y - ((((val_in.x>>nbit)&3)==1)?1:0);
val1.y += correzione_dal_blocco_precedente.y - ((((val_in.y>>nbit)&3)==1)?1:0);
val1.z += correzione_dal_blocco_precedente.y - ((((val_in.z>>nbit)&3)==1)?1:0);
val1.w += correzione_dal_blocco_precedente.y - ((((val_in.w>>nbit)&3)==1)?1:0);
val2.x += correzione_dal_blocco_precedente.z - ((((val_in.x>>nbit)&3)==2)?1:0);
val2.y += correzione_dal_blocco_precedente.z - ((((val_in.y>>nbit)&3)==2)?1:0);
val2.z += correzione_dal_blocco_precedente.z - ((((val_in.z>>nbit)&3)==2)?1:0);
val2.w += correzione_dal_blocco_precedente.z - ((((val_in.w>>nbit)&3)==2)?1:0);
val3.x += correzione_dal_blocco_precedente.w - ((((val_in.x>>nbit)&3)==3)?1:0);
val3.y += correzione_dal_blocco_precedente.w - ((((val_in.y>>nbit)&3)==3)?1:0);
val3.z += correzione_dal_blocco_precedente.w - ((((val_in.z>>nbit)&3)==3)?1:0);
val3.w += correzione_dal_blocco_precedente.w - ((((val_in.w>>nbit)&3)==3)?1:0);
scan0[idx] = val0;
scan1[idx] = val1;
scan2[idx] = val2;
scan3[idx] = val3;
}
}
}
//kernel adibito al riordino dei vettori utilizzando 2 bit
__global__ void reorder(const int4* restrict scan0,const int4* restrict scan1,const int4* restrict scan2,const int4* restrict scan3,const int4* restrict in, int* restrict out,int nels,int4* max)
{
int els_per_sezione = (nels + gridDim.x - 1)/gridDim.x;
int idx = threadIdx.x + blockIdx.x*els_per_sezione;
int numero_cicli = (els_per_sezione +blockDim.x -1)/blockDim.x;
int elemento_limite = min(els_per_sezione*(blockIdx.x+1), nels);
int4 offset_max=make_int4((*max).x,(*max).y,(*max).z,(*max).w);
int4 val_scan_succ;
//scan dei max
int4 max_scan=make_int4(offset_max.x,offset_max.y,offset_max.z,offset_max.w);
offset_max.x=0; //offset di 00 (0)
offset_max.y=max_scan.x; //offset di 01 (1)
offset_max.z=offset_max.y + max_scan.y; //offset di 10 (2)
offset_max.w=offset_max.z + max_scan.z; //offset di 11 (3)
//inizio ciclo di reorder
for (int ciclo = 0; ciclo < numero_cicli;++ciclo, idx += blockDim.x)
{
if (idx < elemento_limite)
{
int4 val_num=in[idx], val_scan0=scan0[idx], val_scan1=scan1[idx], val_scan2=scan2[idx], val_scan3=scan3[idx];
// confronto 1° elemento con 2° elemento della quartina
if(val_scan0.x!=val_scan0.y)
out[val_scan0.x+offset_max.x]=val_num.x;
else if(val_scan1.x!=val_scan1.y)
out[val_scan1.x+offset_max.y]=val_num.x;
else if(val_scan2.x!=val_scan2.y)
out[val_scan2.x+offset_max.z]=val_num.x;
else
out[val_scan3.x+offset_max.w]=val_num.x;
// confronto 2° elemento con 3° elemento della quartina
if(val_scan0.y!=val_scan0.z)
out[val_scan0.y+offset_max.x]=val_num.y;
else if(val_scan1.y!=val_scan1.z)
out[val_scan1.y+offset_max.y]=val_num.y;
else if(val_scan2.y!=val_scan2.z)
out[val_scan2.y+offset_max.z]=val_num.y;
else
out[val_scan3.y+offset_max.w]=val_num.y;
// confronto 3° elemento con 4° elemento della quartina
if(val_scan0.z!=val_scan0.w)
out[val_scan0.z+offset_max.x]=val_num.z;
else if(val_scan1.z!=val_scan1.w)
out[val_scan1.z+offset_max.y]=val_num.z;
else if(val_scan2.z!=val_scan2.w)
out[val_scan2.z+offset_max.z]=val_num.z;
else
out[val_scan3.z+offset_max.w]=val_num.z;
//confronto 4° elemento con 1° elemento della quartina dello scan successivo
if(idx!=nels-1)
{
//scan3[idx+1] puo non essere preso, visto che non viene mai usato
val_scan_succ=make_int4(scan0[idx+1].x,scan1[idx+1].x,scan2[idx+1].x,0); // primi valori delle quartine successive degli scan
if(val_scan0.w!=val_scan_succ.x)
out[val_scan0.w + offset_max.x]=val_num.w;
else if(val_scan1.w!=val_scan_succ.y)
out[val_scan1.w + offset_max.y]=val_num.w;
else if(val_scan2.w!=val_scan_succ.z)
out[val_scan2.w + offset_max.z]=val_num.w;
else
out[val_scan3.w + offset_max.w]=val_num.w;
}
else
{
if(val_scan0.w!=max_scan.x)
out[val_scan0.w + offset_max.x]=val_num.w;
else if(val_scan1.w!=max_scan.y)
out[val_scan1.w + offset_max.y]=val_num.w;
else if(val_scan2.w!=max_scan.z)
out[val_scan2.w + offset_max.z]=val_num.w;
else
out[val_scan3.w + offset_max.w]=val_num.w;
}
}
}
}
int main(int argc, char *argv[])
{
if (argc < 4)
error("sintassi radix_sort: numels thread_per_blocco numero_blocchi_scan valore_massimo");
int nels = atoi(argv[1]); /* numero di elementi */
if (nels <= 0)
error("il numero di elementi deve essere positivo");
if (nels & 3)
error("il numero di elementi deve essere multiplo di 4");
int numThreads = atoi(argv[2]); /* local work size */
if (numThreads <= 0)
error("il numero di thread per blocco deve essere positivo");
int numBlocksScan = atoi(argv[3]); /* numero blocchi scan */
if (numBlocksScan <= 0)
error("il numero di blocchi deve essere positivo");
int numMax = atoi(argv[4]); /* numero blocchi scan */
if (numMax <= 0)
error("il valore massimo deve essere positivo");
//inizializzazione dei vettori
const size_t memsize = sizeof(int)*nels;
int4 *d_v1, *d_scan0,*d_scan1,*d_scan2,*d_scan3, *d_code0,*d_code1,*d_code2,*d_code3,*d_out,*tmp;
int numbit;
int4 *d_max;
//calolo dei cicli da fare avendo il massimo
int cicli=int(log(numMax)/log(2)) + 1;
printf("numero cicli da fare=%d\n",cicli/2);
//allocazione dei vettori su GPU
cudaError_t err = cudaMalloc(&d_v1, memsize);
cuda_check(err, "malloc v1");
err= cudaMalloc(&d_max,sizeof(int4));
cuda_check(err,"malloc max");
err = cudaMalloc(&d_scan0, memsize);
cuda_check(err, "malloc scan0");
err = cudaMalloc(&d_scan1, memsize);
cuda_check(err, "malloc scan1");
err = cudaMalloc(&d_scan2, memsize);
cuda_check(err, "malloc scan2");
err = cudaMalloc(&d_scan3, memsize);
cuda_check(err, "malloc scan3");
err = cudaMalloc(&d_code0, numBlocksScan*sizeof(int));
cuda_check(err, "malloc code0");
err = cudaMalloc(&d_code1, numBlocksScan*sizeof(int));
cuda_check(err, "malloc code1");
err = cudaMalloc(&d_code2, numBlocksScan*sizeof(int));
cuda_check(err, "malloc code2");
err = cudaMalloc(&d_code3, numBlocksScan*sizeof(int));
cuda_check(err, "malloc code3");
err = cudaMalloc(&d_out, memsize);
cuda_check(err, "malloc out");
//allocazione su CPU
int *vout = (int*)malloc(memsize);
if (!vout)
error("alloc vscan");
//inizializzazione su CPU di numeri random con massimo possibile numMax
init_random(vout,nels,numMax);
/*
//inizializzazione su GPU di numeri decrescenti a partire da nels
int numBlocks = (nels + numThreads - 1)/numThreads;
init<<<numBlocks, numThreads>>>((int*)d_v1, nels);
*/
//prova ad otimizzare la cache
cudaFuncSetCacheConfig(scan_step1, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(scan_step2, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(fixup, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(reorder, cudaFuncCachePreferL1);
err = cudaMemcpy(d_v1,vout,memsize, cudaMemcpyHostToDevice);
cuda_check(err, "memcpy vett su GPU");
//creazione eventi
cudaEvent_t before_scan, after_scan;
err = cudaEventCreate(&before_scan);
cuda_check(err, "create event before");
err = cudaEventCreate(&after_scan);
cuda_check(err, "create event after");
cudaEventRecord(before_scan);
for(numbit=0;numbit<cicli;numbit+=2)
{
//pulizia dei vettori di code
err= cudaMemset (d_code0,0, numBlocksScan*sizeof(int));
cuda_check(err, "memset0");
err= cudaMemset (d_code1,0, numBlocksScan*sizeof(int));
cuda_check(err, "memset1");
err= cudaMemset (d_code2,0, numBlocksScan*sizeof(int));
cuda_check(err, "memset2");
err= cudaMemset (d_code3,0, numBlocksScan*sizeof(int));
cuda_check(err, "memset3");
scan_step1<<<numBlocksScan, numThreads, numThreads*sizeof(int)*4>>>
(d_scan0,d_scan1,d_scan2,d_scan3, d_v1, nels/4, (int*)d_code0,(int*)d_code1,(int*)d_code2,(int*)d_code3, numbit);
scan_step2<<<1, numThreads, numThreads*sizeof(int)*4>>>
(d_code0, d_code1,d_code2,d_code3, numBlocksScan/4);
fixup<<<numBlocksScan, numThreads>>>(d_scan0,d_scan1,d_scan2,d_scan3, nels/4, (int*)d_code0,(int*)d_code1,(int*)d_code2,(int*)d_code3,d_v1,numbit,d_max);
reorder<<<numBlocksScan,numThreads>>>(d_scan0,d_scan1,d_scan2,d_scan3,d_v1,(int*)d_out,nels/4,d_max);
//scambio deii puntatori d_out e d_v1
if(numbit+2 <cicli)
{
tmp=d_v1;
d_v1=d_out;
d_out=tmp;
}
}
cudaEventRecord(after_scan);
err = cudaEventSynchronize(after_scan);
cuda_check(err, "after scan sznc");
float runtime_ms;
cudaEventElapsedTime(&runtime_ms, before_scan, after_scan);
printf("scan runtime: %.4g ms\n", runtime_ms);
err = cudaMemcpy(vout, d_out, memsize, cudaMemcpyDeviceToHost);
cuda_check(err, "memcpy");
printf("\n\n");
verify_random(vout,nels)?printf("Ordinamento riuscito!\n"):printf("Ordinamento non riuscito!\n");
if(nels <=32)
for(int i=0;i<nels;++i)
printf("%d ",vout[i]);
printf("\n");
} | #include <hip/hip_runtime.h>
#include <cstdio>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime_api.h>
#define restrict __restrict__
using namespace std;
int error(const char *msg) {
fprintf(stderr, "%s\n", msg);
exit(1);
}
void cuda_check(hipError_t err, const char *msg)
{
if (err != hipSuccess) {
fprintf(stderr, "%s: errore %d - %s\n",
msg, err, hipGetErrorString(err));
exit(1);
}
}
//inizializzazione su CPU con numeri random
void init_random(int vett[],int nels,int max)
{
srand(time(NULL));
for(int i=0;i<nels;++i)
vett[i]=rand()% max+ 1;
}
//verifica con numeri random
bool verify_random(const int* scan_out, int nels)
{
for(int i=0;i<nels-1;++i)
if(scan_out[i]>scan_out[i+1])
{
fprintf(stderr, "errore tra le posizioni %d e %d \n", i,i+1);
return false;
}
return true;
}
//verifica con numeri ordinati al contrario partendo da nels fino ad arrivare ad 1
bool verify(const int* scan_out, int nels)
{
int err=0;
for (int i = 0; i < nels; ++i) {
if(i+1!=scan_out[i])
{
fprintf(stderr, "verify,idx=%d: val_scan:%d \n", i,scan_out[i]);
err=1;
}
}
if(err)
return false;
return true;
}
//inizializzazione su GPU
__global__ void init(int *vec, int nels)
{
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx < nels)
vec[idx] = nels-idx;
}
extern __shared__ int4 shared[];
__device__ void scan_delle_code(int4 coda)
{
__syncthreads();
shared[threadIdx.x] = coda;
for (int offset = 1; offset < blockDim.x; offset *= 2) {
__syncthreads();
if (threadIdx.x >= offset)
{
coda.x += shared[threadIdx.x - offset].x;
coda.y += shared[threadIdx.x - offset].y;
coda.z += shared[threadIdx.x - offset].z;
coda.w += shared[threadIdx.x - offset].w;
}
__syncthreads();
shared[threadIdx.x] = coda;
}
__syncthreads();
}
//primo scan
__global__ void scan_step1(int4 * restrict out0,int4 * restrict out1,int4 * restrict out2,int4 * restrict out3, const int4 * restrict in, int nels /* numero di quartine */, int * restrict code0,int * restrict code1,int * restrict code2,int * restrict code3, int nbit)
{
int els_per_sezione = (nels + gridDim.x - 1)/gridDim.x;
int idx = threadIdx.x + blockIdx.x*els_per_sezione;
int4 val,val0,val1,val2,val3;
int4 correzione_dal_blocco_precedente = make_int4(0,0,0,0);
int numero_cicli = (els_per_sezione + blockDim.x - 1)/blockDim.x;
int elemento_limite = min(els_per_sezione*(blockIdx.x+1), nels);
for (int ciclo = 0; ciclo < numero_cicli;++ciclo, idx += blockDim.x)
{
val = (idx < elemento_limite ? in[idx] : make_int4(0, 0, 0, 0));
val0= make_int4(0,0,0,0);
val1= make_int4(0,0,0,0);
val2= make_int4(0,0,0,0);
val3= make_int4(0,0,0,0);
//basta fare solo 3 controlli sui bit,il quarto cofronto (00 per comodità) è complementare!
//controllo sul primo valore della quartina (x)
if(((val.x>>nbit)&3)==1)
val1.x=1;
else if(((val.x>>nbit)&3)==2)
val2.x=1;
else if(((val.x>>nbit)&3)==3)
val3.x=1;
else
val0.x=1;
//controllo sulla seconda componente della quartina (y)
if(((val.y>>nbit)&3)==1)
val1.y=1;
else if(((val.y>>nbit)&3)==2)
val2.y=1;
else if(((val.y>>nbit)&3)==3)
val3.y=1;
else
val0.y=1;
//controllo sulla terza componente della quartina (z)
if(((val.z>>nbit)&3)==1)
val1.z=1;
else if(((val.z>>nbit)&3)==2)
val2.z=1;
else if(((val.z>>nbit)&3)==3)
val3.z=1;
else
val0.z=1;
//controllo sulla quarta componente della quartina (w)
if(((val.w>>nbit)&3)==1)
val1.w=1;
else if(((val.w>>nbit)&3)==2)
val2.w=1;
else if(((val.w>>nbit)&3)==3)
val3.w=1;
else
val0.w=1;
/* scan delle componenti dei val */
val0.y += val0.x;
val0.z += val0.y;
val0.w += val0.z;
val1.y += val1.x;
val1.z += val1.y;
val1.w += val1.z;
val2.y += val2.x;
val2.z += val2.y;
val2.w += val2.z;
val3.y += val3.x;
val3.z += val3.y;
val3.w += val3.z;
int4 coda=make_int4(val0.w,val1.w,val2.w,val3.w);
scan_delle_code(coda);
int4 correzione_dai_thread_precedenti = make_int4(0,0,0,0);
if (threadIdx.x > 0)
correzione_dai_thread_precedenti = shared[threadIdx.x-1];
int correzione_totale = correzione_dal_blocco_precedente.x + correzione_dai_thread_precedenti.x;
val0.x += correzione_totale;
val0.y += correzione_totale;
val0.z += correzione_totale;
val0.w += correzione_totale;
correzione_totale = correzione_dal_blocco_precedente.y + correzione_dai_thread_precedenti.y;
val1.x += correzione_totale;
val1.y += correzione_totale;
val1.z += correzione_totale;
val1.w += correzione_totale;
correzione_totale = correzione_dal_blocco_precedente.z + correzione_dai_thread_precedenti.z;
val2.x += correzione_totale;
val2.y += correzione_totale;
val2.z += correzione_totale;
val2.w += correzione_totale;
correzione_totale = correzione_dal_blocco_precedente.w + correzione_dai_thread_precedenti.w;
val3.x += correzione_totale;
val3.y += correzione_totale;
val3.z += correzione_totale;
val3.w += correzione_totale;
correzione_dal_blocco_precedente.x += shared[blockDim.x-1].x; //correzione di 00 (0)
correzione_dal_blocco_precedente.y += shared[blockDim.x-1].y; //correzione di 01 (1)
correzione_dal_blocco_precedente.z += shared[blockDim.x-1].z; //correzione di 10 (2)
correzione_dal_blocco_precedente.w += shared[blockDim.x-1].w; //correzione di 11 (3)
if (idx < nels)
{
out0[idx] = val0;
out1[idx] = val1;
out2[idx] = val2;
out3[idx] = val3;
}
}
if (gridDim.x > 1 && threadIdx.x == blockDim.x - 1)
{
code0[blockIdx.x] = val0.w;
code1[blockIdx.x] = val1.w;
code2[blockIdx.x] = val2.w;
code3[blockIdx.x] = val3.w;
}
}
//secondo scan fatto sulo sui vettori di code
__global__ void scan_step2(int4 * restrict code0, int4 * restrict code1, int4 * restrict code2, int4 * restrict code3, int nels /* numero di quartine */)
{
int els_per_sezione = (nels + gridDim.x - 1)/gridDim.x;
int idx = threadIdx.x + blockIdx.x*els_per_sezione;
int4 val0,val1,val2,val3;
int4 correzione_dal_blocco_precedente = make_int4(0,0,0,0);
int numero_cicli = (els_per_sezione + blockDim.x - 1)/blockDim.x;
int elemento_limite = min(els_per_sezione*(blockIdx.x+1), nels);
for (int ciclo = 0; ciclo < numero_cicli;++ciclo, idx += blockDim.x)
{
val0 = (idx < elemento_limite ? code0[idx] : make_int4(0, 0, 0, 0));
val1 = (idx < elemento_limite ? code1[idx] : make_int4(0, 0, 0, 0));
val2 = (idx < elemento_limite ? code2[idx] : make_int4(0, 0, 0, 0));
val3 = (idx < elemento_limite ? code3[idx] : make_int4(0, 0, 0, 0));
/* scan delle componenti di val */
val0.y += val0.x;
val0.z += val0.y;
val0.w += val0.z;
val1.y += val1.x;
val1.z += val1.y;
val1.w += val1.z;
val2.y += val2.x;
val2.z += val2.y;
val2.w += val2.z;
val3.y += val3.x;
val3.z += val3.y;
val3.w += val3.z;
//da modificare anche scan_delle_code
int4 coda=make_int4(val0.w,val1.w,val2.w,val3.w);
scan_delle_code(coda);
int4 correzione_dai_thread_precedenti = make_int4(0,0,0,0);
if (threadIdx.x > 0)
correzione_dai_thread_precedenti =
shared[threadIdx.x-1];
int correzione_totale = correzione_dal_blocco_precedente.x + correzione_dai_thread_precedenti.x;
val0.x += correzione_totale;
val0.y += correzione_totale;
val0.z += correzione_totale;
val0.w += correzione_totale;
correzione_totale = correzione_dal_blocco_precedente.y + correzione_dai_thread_precedenti.y;
val1.x += correzione_totale;
val1.y += correzione_totale;
val1.z += correzione_totale;
val1.w += correzione_totale;
correzione_totale = correzione_dal_blocco_precedente.z + correzione_dai_thread_precedenti.z;
val2.x += correzione_totale;
val2.y += correzione_totale;
val2.z += correzione_totale;
val2.w += correzione_totale;
correzione_totale = correzione_dal_blocco_precedente.w + correzione_dai_thread_precedenti.w;
val3.x += correzione_totale;
val3.y += correzione_totale;
val3.z += correzione_totale;
val3.w += correzione_totale;
correzione_dal_blocco_precedente.x += shared[blockDim.x-1].x; //correzione di 00 (0)
correzione_dal_blocco_precedente.y += shared[blockDim.x-1].y; //correzione di 01 (1)
correzione_dal_blocco_precedente.z += shared[blockDim.x-1].z; //correzione di 10 (2)
correzione_dal_blocco_precedente.w += shared[blockDim.x-1].w; //correzione di 11 (3)
if (idx < nels)
{
code0[idx] = val0;
code1[idx] = val1;
code2[idx] = val2;
code3[idx] = val3;
}
}
}
__global__ void fixup(int4 * restrict scan0,int4 * restrict scan1,int4 * restrict scan2,int4 * restrict scan3,int nels ,const int * restrict code0,const int * restrict code1,const int * restrict code2,const int * restrict code3,const int4* restrict in,int nbit,int4* max)
{
int els_per_sezione = (nels + gridDim.x - 1)/gridDim.x;
int idx = threadIdx.x + blockIdx.x*els_per_sezione;
int4 correzione_dal_blocco_precedente =make_int4(0,0,0,0);
if(blockIdx.x>0)
{
correzione_dal_blocco_precedente.x = code0[blockIdx.x - 1];
correzione_dal_blocco_precedente.y = code1[blockIdx.x - 1];
correzione_dal_blocco_precedente.z = code2[blockIdx.x - 1];
correzione_dal_blocco_precedente.w = code3[blockIdx.x - 1];
}
int numero_cicli = (els_per_sezione + blockDim.x - 1)/blockDim.x;
int elemento_limite = min(els_per_sezione*(blockIdx.x+1), nels);
for (int ciclo = 0; ciclo < numero_cicli;++ciclo, idx += blockDim.x) {
if (idx < elemento_limite)
{
int4 val0 = scan0[idx], val1=scan1[idx], val2=scan2[idx], val3=scan3[idx];
int4 val_in=in[idx];
if(idx==nels-1)
{ //salvataggio in memoria globale degli ultimi elementi degli scan inclusivi
(*max).x=val0.w+correzione_dal_blocco_precedente.x;
(*max).y=val1.w+correzione_dal_blocco_precedente.y;
(*max).z=val2.w+correzione_dal_blocco_precedente.z;
(*max).w=val3.w+correzione_dal_blocco_precedente.w;
}
//trasformazione degli scan da inclusivi ad esclusivi
val0.x += correzione_dal_blocco_precedente.x - ((((val_in.x>>nbit)&3)==0)?1:0);
val0.y += correzione_dal_blocco_precedente.x - ((((val_in.y>>nbit)&3)==0)?1:0);
val0.z += correzione_dal_blocco_precedente.x - ((((val_in.z>>nbit)&3)==0)?1:0);
val0.w += correzione_dal_blocco_precedente.x - ((((val_in.w>>nbit)&3)==0)?1:0);
val1.x += correzione_dal_blocco_precedente.y - ((((val_in.x>>nbit)&3)==1)?1:0);
val1.y += correzione_dal_blocco_precedente.y - ((((val_in.y>>nbit)&3)==1)?1:0);
val1.z += correzione_dal_blocco_precedente.y - ((((val_in.z>>nbit)&3)==1)?1:0);
val1.w += correzione_dal_blocco_precedente.y - ((((val_in.w>>nbit)&3)==1)?1:0);
val2.x += correzione_dal_blocco_precedente.z - ((((val_in.x>>nbit)&3)==2)?1:0);
val2.y += correzione_dal_blocco_precedente.z - ((((val_in.y>>nbit)&3)==2)?1:0);
val2.z += correzione_dal_blocco_precedente.z - ((((val_in.z>>nbit)&3)==2)?1:0);
val2.w += correzione_dal_blocco_precedente.z - ((((val_in.w>>nbit)&3)==2)?1:0);
val3.x += correzione_dal_blocco_precedente.w - ((((val_in.x>>nbit)&3)==3)?1:0);
val3.y += correzione_dal_blocco_precedente.w - ((((val_in.y>>nbit)&3)==3)?1:0);
val3.z += correzione_dal_blocco_precedente.w - ((((val_in.z>>nbit)&3)==3)?1:0);
val3.w += correzione_dal_blocco_precedente.w - ((((val_in.w>>nbit)&3)==3)?1:0);
scan0[idx] = val0;
scan1[idx] = val1;
scan2[idx] = val2;
scan3[idx] = val3;
}
}
}
//kernel adibito al riordino dei vettori utilizzando 2 bit
__global__ void reorder(const int4* restrict scan0,const int4* restrict scan1,const int4* restrict scan2,const int4* restrict scan3,const int4* restrict in, int* restrict out,int nels,int4* max)
{
int els_per_sezione = (nels + gridDim.x - 1)/gridDim.x;
int idx = threadIdx.x + blockIdx.x*els_per_sezione;
int numero_cicli = (els_per_sezione +blockDim.x -1)/blockDim.x;
int elemento_limite = min(els_per_sezione*(blockIdx.x+1), nels);
int4 offset_max=make_int4((*max).x,(*max).y,(*max).z,(*max).w);
int4 val_scan_succ;
//scan dei max
int4 max_scan=make_int4(offset_max.x,offset_max.y,offset_max.z,offset_max.w);
offset_max.x=0; //offset di 00 (0)
offset_max.y=max_scan.x; //offset di 01 (1)
offset_max.z=offset_max.y + max_scan.y; //offset di 10 (2)
offset_max.w=offset_max.z + max_scan.z; //offset di 11 (3)
//inizio ciclo di reorder
for (int ciclo = 0; ciclo < numero_cicli;++ciclo, idx += blockDim.x)
{
if (idx < elemento_limite)
{
int4 val_num=in[idx], val_scan0=scan0[idx], val_scan1=scan1[idx], val_scan2=scan2[idx], val_scan3=scan3[idx];
// confronto 1° elemento con 2° elemento della quartina
if(val_scan0.x!=val_scan0.y)
out[val_scan0.x+offset_max.x]=val_num.x;
else if(val_scan1.x!=val_scan1.y)
out[val_scan1.x+offset_max.y]=val_num.x;
else if(val_scan2.x!=val_scan2.y)
out[val_scan2.x+offset_max.z]=val_num.x;
else
out[val_scan3.x+offset_max.w]=val_num.x;
// confronto 2° elemento con 3° elemento della quartina
if(val_scan0.y!=val_scan0.z)
out[val_scan0.y+offset_max.x]=val_num.y;
else if(val_scan1.y!=val_scan1.z)
out[val_scan1.y+offset_max.y]=val_num.y;
else if(val_scan2.y!=val_scan2.z)
out[val_scan2.y+offset_max.z]=val_num.y;
else
out[val_scan3.y+offset_max.w]=val_num.y;
// confronto 3° elemento con 4° elemento della quartina
if(val_scan0.z!=val_scan0.w)
out[val_scan0.z+offset_max.x]=val_num.z;
else if(val_scan1.z!=val_scan1.w)
out[val_scan1.z+offset_max.y]=val_num.z;
else if(val_scan2.z!=val_scan2.w)
out[val_scan2.z+offset_max.z]=val_num.z;
else
out[val_scan3.z+offset_max.w]=val_num.z;
//confronto 4° elemento con 1° elemento della quartina dello scan successivo
if(idx!=nels-1)
{
//scan3[idx+1] puo non essere preso, visto che non viene mai usato
val_scan_succ=make_int4(scan0[idx+1].x,scan1[idx+1].x,scan2[idx+1].x,0); // primi valori delle quartine successive degli scan
if(val_scan0.w!=val_scan_succ.x)
out[val_scan0.w + offset_max.x]=val_num.w;
else if(val_scan1.w!=val_scan_succ.y)
out[val_scan1.w + offset_max.y]=val_num.w;
else if(val_scan2.w!=val_scan_succ.z)
out[val_scan2.w + offset_max.z]=val_num.w;
else
out[val_scan3.w + offset_max.w]=val_num.w;
}
else
{
if(val_scan0.w!=max_scan.x)
out[val_scan0.w + offset_max.x]=val_num.w;
else if(val_scan1.w!=max_scan.y)
out[val_scan1.w + offset_max.y]=val_num.w;
else if(val_scan2.w!=max_scan.z)
out[val_scan2.w + offset_max.z]=val_num.w;
else
out[val_scan3.w + offset_max.w]=val_num.w;
}
}
}
}
int main(int argc, char *argv[])
{
if (argc < 4)
error("sintassi radix_sort: numels thread_per_blocco numero_blocchi_scan valore_massimo");
int nels = atoi(argv[1]); /* numero di elementi */
if (nels <= 0)
error("il numero di elementi deve essere positivo");
if (nels & 3)
error("il numero di elementi deve essere multiplo di 4");
int numThreads = atoi(argv[2]); /* local work size */
if (numThreads <= 0)
error("il numero di thread per blocco deve essere positivo");
int numBlocksScan = atoi(argv[3]); /* numero blocchi scan */
if (numBlocksScan <= 0)
error("il numero di blocchi deve essere positivo");
int numMax = atoi(argv[4]); /* numero blocchi scan */
if (numMax <= 0)
error("il valore massimo deve essere positivo");
//inizializzazione dei vettori
const size_t memsize = sizeof(int)*nels;
int4 *d_v1, *d_scan0,*d_scan1,*d_scan2,*d_scan3, *d_code0,*d_code1,*d_code2,*d_code3,*d_out,*tmp;
int numbit;
int4 *d_max;
//calolo dei cicli da fare avendo il massimo
int cicli=int(log(numMax)/log(2)) + 1;
printf("numero cicli da fare=%d\n",cicli/2);
//allocazione dei vettori su GPU
hipError_t err = hipMalloc(&d_v1, memsize);
cuda_check(err, "malloc v1");
err= hipMalloc(&d_max,sizeof(int4));
cuda_check(err,"malloc max");
err = hipMalloc(&d_scan0, memsize);
cuda_check(err, "malloc scan0");
err = hipMalloc(&d_scan1, memsize);
cuda_check(err, "malloc scan1");
err = hipMalloc(&d_scan2, memsize);
cuda_check(err, "malloc scan2");
err = hipMalloc(&d_scan3, memsize);
cuda_check(err, "malloc scan3");
err = hipMalloc(&d_code0, numBlocksScan*sizeof(int));
cuda_check(err, "malloc code0");
err = hipMalloc(&d_code1, numBlocksScan*sizeof(int));
cuda_check(err, "malloc code1");
err = hipMalloc(&d_code2, numBlocksScan*sizeof(int));
cuda_check(err, "malloc code2");
err = hipMalloc(&d_code3, numBlocksScan*sizeof(int));
cuda_check(err, "malloc code3");
err = hipMalloc(&d_out, memsize);
cuda_check(err, "malloc out");
//allocazione su CPU
int *vout = (int*)malloc(memsize);
if (!vout)
error("alloc vscan");
//inizializzazione su CPU di numeri random con massimo possibile numMax
init_random(vout,nels,numMax);
/*
//inizializzazione su GPU di numeri decrescenti a partire da nels
int numBlocks = (nels + numThreads - 1)/numThreads;
init<<<numBlocks, numThreads>>>((int*)d_v1, nels);
*/
//prova ad otimizzare la cache
hipFuncSetCacheConfig(reinterpret_cast<const void*>(scan_step1), hipFuncCachePreferL1);
hipFuncSetCacheConfig(reinterpret_cast<const void*>(scan_step2), hipFuncCachePreferL1);
hipFuncSetCacheConfig(reinterpret_cast<const void*>(fixup), hipFuncCachePreferL1);
hipFuncSetCacheConfig(reinterpret_cast<const void*>(reorder), hipFuncCachePreferL1);
err = hipMemcpy(d_v1,vout,memsize, hipMemcpyHostToDevice);
cuda_check(err, "memcpy vett su GPU");
//creazione eventi
hipEvent_t before_scan, after_scan;
err = hipEventCreate(&before_scan);
cuda_check(err, "create event before");
err = hipEventCreate(&after_scan);
cuda_check(err, "create event after");
hipEventRecord(before_scan);
for(numbit=0;numbit<cicli;numbit+=2)
{
//pulizia dei vettori di code
err= hipMemset (d_code0,0, numBlocksScan*sizeof(int));
cuda_check(err, "memset0");
err= hipMemset (d_code1,0, numBlocksScan*sizeof(int));
cuda_check(err, "memset1");
err= hipMemset (d_code2,0, numBlocksScan*sizeof(int));
cuda_check(err, "memset2");
err= hipMemset (d_code3,0, numBlocksScan*sizeof(int));
cuda_check(err, "memset3");
scan_step1<<<numBlocksScan, numThreads, numThreads*sizeof(int)*4>>>
(d_scan0,d_scan1,d_scan2,d_scan3, d_v1, nels/4, (int*)d_code0,(int*)d_code1,(int*)d_code2,(int*)d_code3, numbit);
scan_step2<<<1, numThreads, numThreads*sizeof(int)*4>>>
(d_code0, d_code1,d_code2,d_code3, numBlocksScan/4);
fixup<<<numBlocksScan, numThreads>>>(d_scan0,d_scan1,d_scan2,d_scan3, nels/4, (int*)d_code0,(int*)d_code1,(int*)d_code2,(int*)d_code3,d_v1,numbit,d_max);
reorder<<<numBlocksScan,numThreads>>>(d_scan0,d_scan1,d_scan2,d_scan3,d_v1,(int*)d_out,nels/4,d_max);
//scambio deii puntatori d_out e d_v1
if(numbit+2 <cicli)
{
tmp=d_v1;
d_v1=d_out;
d_out=tmp;
}
}
hipEventRecord(after_scan);
err = hipEventSynchronize(after_scan);
cuda_check(err, "after scan sznc");
float runtime_ms;
hipEventElapsedTime(&runtime_ms, before_scan, after_scan);
printf("scan runtime: %.4g ms\n", runtime_ms);
err = hipMemcpy(vout, d_out, memsize, hipMemcpyDeviceToHost);
cuda_check(err, "memcpy");
printf("\n\n");
verify_random(vout,nels)?printf("Ordinamento riuscito!\n"):printf("Ordinamento non riuscito!\n");
if(nels <=32)
for(int i=0;i<nels;++i)
printf("%d ",vout[i]);
printf("\n");
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
__global__ void saxpy(uint n, float a, float *x, float *y) {
uint i = blockIdx.x*blockDim.x + threadIdx.x; // nvcc built-ins
if(i < n)
y[i] = a*x[i] + y[i];
}
void misc(void) {
int ndev;
cudaDeviceProp prop;
cudaGetDeviceCount(&ndev);
printf("This machine has %d CUDA devices.\n", ndev);
for(int i = 0; i < ndev; i++) {
const char *indent = (ndev == 0) ? "" : " ";
cudaGetDeviceProperties(&prop, i);
if(ndev > 0)
printf("Device %d:\n", i);
printf("%sdevice.name = %s\n", indent, prop.name);
printf("%sdevice.maxThreadsPerBlock = %d\n", indent, prop.maxThreadsPerBlock);
}
}
int main(int argc, char **argv) {
uint n = atoi(argv[1]);
int size = n*sizeof(float);
float *x, *y, *yy;
float *dev_x, *dev_y;
misc();
x = (float *)malloc(size);
y = (float *)malloc(size);
yy = (float *)malloc(size);
for(int i = 0; i < n; i++) {
x[i] = i;
y[i] = i*i;
}
cudaMalloc((void**)(&dev_x), size);
cudaMalloc((void**)(&dev_y), size);
cudaMemcpy(dev_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, y, size, cudaMemcpyHostToDevice);
float a = 3.0;
saxpy<<<ceil(n/256.0),256>>>(n, a, dev_x, dev_y);
cudaMemcpy(yy, dev_y, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < n; i++) { // check the result
if(yy[i] != a*x[i] + y[i]) {
fprintf(stderr, "ERROR: i=%d, a = %s, x[i]=%f, y[i]=%f, yy[i]=%f\n",
i, a, x[i], y[i], yy[i]);
exit(-1);
}
}
printf("The results match!\n");
free(x);
free(y);
free(yy);
cudaFree(dev_x);
cudaFree(dev_y);
exit(0);
} | code for sm_80
Function : _Z5saxpyjfPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x160], PT ; /* 0x0000580004007a0c */
/* 0x000fda0003f06070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE.U32 R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fc800078e0005 */
/*0090*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fe400078e0005 */
/*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ FFMA R7, R2, c[0x0][0x164], R7 ; /* 0x0000590002077a23 */
/* 0x004fca0000000007 */
/*00d0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
__global__ void saxpy(uint n, float a, float *x, float *y) {
uint i = blockIdx.x*blockDim.x + threadIdx.x; // nvcc built-ins
if(i < n)
y[i] = a*x[i] + y[i];
}
void misc(void) {
int ndev;
cudaDeviceProp prop;
cudaGetDeviceCount(&ndev);
printf("This machine has %d CUDA devices.\n", ndev);
for(int i = 0; i < ndev; i++) {
const char *indent = (ndev == 0) ? "" : " ";
cudaGetDeviceProperties(&prop, i);
if(ndev > 0)
printf("Device %d:\n", i);
printf("%sdevice.name = %s\n", indent, prop.name);
printf("%sdevice.maxThreadsPerBlock = %d\n", indent, prop.maxThreadsPerBlock);
}
}
int main(int argc, char **argv) {
uint n = atoi(argv[1]);
int size = n*sizeof(float);
float *x, *y, *yy;
float *dev_x, *dev_y;
misc();
x = (float *)malloc(size);
y = (float *)malloc(size);
yy = (float *)malloc(size);
for(int i = 0; i < n; i++) {
x[i] = i;
y[i] = i*i;
}
cudaMalloc((void**)(&dev_x), size);
cudaMalloc((void**)(&dev_y), size);
cudaMemcpy(dev_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, y, size, cudaMemcpyHostToDevice);
float a = 3.0;
saxpy<<<ceil(n/256.0),256>>>(n, a, dev_x, dev_y);
cudaMemcpy(yy, dev_y, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < n; i++) { // check the result
if(yy[i] != a*x[i] + y[i]) {
fprintf(stderr, "ERROR: i=%d, a = %s, x[i]=%f, y[i]=%f, yy[i]=%f\n",
i, a, x[i], y[i], yy[i]);
exit(-1);
}
}
printf("The results match!\n");
free(x);
free(y);
free(yy);
cudaFree(dev_x);
cudaFree(dev_y);
exit(0);
} | .file "tmpxft_00020df7_00000000-6_saxpy.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string ""
.LC1:
.string " "
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "This machine has %d CUDA devices.\n"
.section .rodata.str1.1
.LC3:
.string "Device %d:\n"
.LC4:
.string "%sdevice.name = %s\n"
.section .rodata.str1.8
.align 8
.LC5:
.string "%sdevice.maxThreadsPerBlock = %d\n"
.text
.globl _Z4miscv
.type _Z4miscv, @function
_Z4miscv:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $1064, %rsp
.cfi_def_cfa_offset 1120
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %ebp
leaq .LC1(%rip), %rbx
leaq 16(%rsp), %r12
leaq .LC3(%rip), %r15
leaq .LC4(%rip), %r14
leaq .LC5(%rip), %r13
cmpl $0, 12(%rsp)
jg .L4
.L3:
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $1064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
movq %r12, %rcx
movq %rbx, %rdx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %ecx
movq %rbx, %rdx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebp
movl 12(%rsp), %eax
cmpl %ebp, %eax
jle .L3
testl %eax, %eax
leaq .LC1(%rip), %rbx
leaq .LC0(%rip), %rax
cmove %rax, %rbx
.L4:
movl %ebp, %esi
movq %r12, %rdi
call cudaGetDeviceProperties_v2@PLT
cmpl $0, 12(%rsp)
jle .L7
movl %ebp, %edx
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L7
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z4miscv, .-_Z4miscv
.globl _Z28__device_stub__Z5saxpyjfPfS_jfPfS_
.type _Z28__device_stub__Z5saxpyjfPfS_jfPfS_, @function
_Z28__device_stub__Z5saxpyjfPfS_jfPfS_:
.LFB2083:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movss %xmm0, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5saxpyjfPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z28__device_stub__Z5saxpyjfPfS_jfPfS_, .-_Z28__device_stub__Z5saxpyjfPfS_jfPfS_
.globl _Z5saxpyjfPfS_
.type _Z5saxpyjfPfS_, @function
_Z5saxpyjfPfS_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z5saxpyjfPfS_jfPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z5saxpyjfPfS_, .-_Z5saxpyjfPfS_
.section .rodata.str1.8
.align 8
.LC12:
.string "ERROR: i=%d, a = %s, x[i]=%f, y[i]=%f, yy[i]=%f\n"
.section .rodata.str1.1
.LC13:
.string "The results match!\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r12
movl %eax, %r15d
call _Z4miscv
leal 0(,%r12,4), %r14d
movslq %r14d, %r14
movq %r14, %rdi
call malloc@PLT
movq %rax, %rbp
movq %r14, %rdi
call malloc@PLT
movq %rax, %rbx
movq %r14, %rdi
call malloc@PLT
movq %rax, %r13
testl %r12d, %r12d
je .L24
movl %r12d, %ecx
movl $0, %eax
.L25:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rax,4)
movl %eax, %edx
imull %eax, %edx
pxor %xmm0, %xmm0
cvtsi2ssl %edx, %xmm0
movss %xmm0, (%rbx,%rax,4)
addq $1, %rax
cmpq %rcx, %rax
jne .L25
.L24:
movq %rsp, %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r14, %rdx
movq %rbp, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r14, %rdx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl %r12d, %eax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
mulsd .LC6(%rip), %xmm0
movapd %xmm0, %xmm3
movsd .LC14(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC7(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L28
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC9(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L28:
cvttsd2siq %xmm3, %rax
movl %eax, 16(%rsp)
movl $1, 20(%rsp)
movl 36(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L39
.L29:
movl $2, %ecx
movq %r14, %rdx
movq 8(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
testl %r12d, %r12d
je .L30
movl %r12d, %r12d
movl $0, %ecx
movss .LC10(%rip), %xmm4
.L33:
movss 0(%r13,%rcx,4), %xmm3
movss 0(%rbp,%rcx,4), %xmm1
movss (%rbx,%rcx,4), %xmm2
movaps %xmm1, %xmm0
mulss %xmm4, %xmm0
addss %xmm2, %xmm0
ucomiss %xmm0, %xmm3
jp .L35
jne .L35
addq $1, %rcx
cmpq %r12, %rcx
jne .L33
.L30:
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movl $0, %edi
call exit@PLT
.L39:
movq 8(%rsp), %rdx
movq (%rsp), %rsi
movss .LC10(%rip), %xmm0
movl %r15d, %edi
call _Z28__device_stub__Z5saxpyjfPfS_jfPfS_
jmp .L29
.L35:
cvtss2sd %xmm3, %xmm3
cvtss2sd %xmm2, %xmm2
cvtss2sd %xmm1, %xmm1
movsd .LC11(%rip), %xmm0
leaq .LC12(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $4, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC15:
.string "_Z5saxpyjfPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z5saxpyjfPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC6:
.long 0
.long 1064304640
.align 8
.LC7:
.long 0
.long 1127219200
.align 8
.LC9:
.long 0
.long 1072693248
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC10:
.long 1077936128
.section .rodata.cst8
.align 8
.LC11:
.long 0
.long 1074266112
.align 8
.LC14:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
__global__ void saxpy(uint n, float a, float *x, float *y) {
uint i = blockIdx.x*blockDim.x + threadIdx.x; // nvcc built-ins
if(i < n)
y[i] = a*x[i] + y[i];
}
void misc(void) {
int ndev;
cudaDeviceProp prop;
cudaGetDeviceCount(&ndev);
printf("This machine has %d CUDA devices.\n", ndev);
for(int i = 0; i < ndev; i++) {
const char *indent = (ndev == 0) ? "" : " ";
cudaGetDeviceProperties(&prop, i);
if(ndev > 0)
printf("Device %d:\n", i);
printf("%sdevice.name = %s\n", indent, prop.name);
printf("%sdevice.maxThreadsPerBlock = %d\n", indent, prop.maxThreadsPerBlock);
}
}
int main(int argc, char **argv) {
uint n = atoi(argv[1]);
int size = n*sizeof(float);
float *x, *y, *yy;
float *dev_x, *dev_y;
misc();
x = (float *)malloc(size);
y = (float *)malloc(size);
yy = (float *)malloc(size);
for(int i = 0; i < n; i++) {
x[i] = i;
y[i] = i*i;
}
cudaMalloc((void**)(&dev_x), size);
cudaMalloc((void**)(&dev_y), size);
cudaMemcpy(dev_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, y, size, cudaMemcpyHostToDevice);
float a = 3.0;
saxpy<<<ceil(n/256.0),256>>>(n, a, dev_x, dev_y);
cudaMemcpy(yy, dev_y, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < n; i++) { // check the result
if(yy[i] != a*x[i] + y[i]) {
fprintf(stderr, "ERROR: i=%d, a = %s, x[i]=%f, y[i]=%f, yy[i]=%f\n",
i, a, x[i], y[i], yy[i]);
exit(-1);
}
}
printf("The results match!\n");
free(x);
free(y);
free(yy);
cudaFree(dev_x);
cudaFree(dev_y);
exit(0);
} | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void saxpy(uint n, float a, float *x, float *y) {
uint i = blockIdx.x*blockDim.x + threadIdx.x; // nvcc built-ins
if(i < n)
y[i] = a*x[i] + y[i];
}
void misc(void) {
int ndev;
hipDeviceProp_t prop;
hipGetDeviceCount(&ndev);
printf("This machine has %d CUDA devices.\n", ndev);
for(int i = 0; i < ndev; i++) {
const char *indent = (ndev == 0) ? "" : " ";
hipGetDeviceProperties(&prop, i);
if(ndev > 0)
printf("Device %d:\n", i);
printf("%sdevice.name = %s\n", indent, prop.name);
printf("%sdevice.maxThreadsPerBlock = %d\n", indent, prop.maxThreadsPerBlock);
}
}
int main(int argc, char **argv) {
uint n = atoi(argv[1]);
int size = n*sizeof(float);
float *x, *y, *yy;
float *dev_x, *dev_y;
misc();
x = (float *)malloc(size);
y = (float *)malloc(size);
yy = (float *)malloc(size);
for(int i = 0; i < n; i++) {
x[i] = i;
y[i] = i*i;
}
hipMalloc((void**)(&dev_x), size);
hipMalloc((void**)(&dev_y), size);
hipMemcpy(dev_x, x, size, hipMemcpyHostToDevice);
hipMemcpy(dev_y, y, size, hipMemcpyHostToDevice);
float a = 3.0;
saxpy<<<ceil(n/256.0),256>>>(n, a, dev_x, dev_y);
hipMemcpy(yy, dev_y, size, hipMemcpyDeviceToHost);
for(int i = 0; i < n; i++) { // check the result
if(yy[i] != a*x[i] + y[i]) {
fprintf(stderr, "ERROR: i=%d, a = %s, x[i]=%f, y[i]=%f, yy[i]=%f\n",
i, a, x[i], y[i], yy[i]);
exit(-1);
}
}
printf("The results match!\n");
free(x);
free(y);
free(yy);
hipFree(dev_x);
hipFree(dev_y);
exit(0);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void saxpy(uint n, float a, float *x, float *y) {
uint i = blockIdx.x*blockDim.x + threadIdx.x; // nvcc built-ins
if(i < n)
y[i] = a*x[i] + y[i];
}
void misc(void) {
int ndev;
hipDeviceProp_t prop;
hipGetDeviceCount(&ndev);
printf("This machine has %d CUDA devices.\n", ndev);
for(int i = 0; i < ndev; i++) {
const char *indent = (ndev == 0) ? "" : " ";
hipGetDeviceProperties(&prop, i);
if(ndev > 0)
printf("Device %d:\n", i);
printf("%sdevice.name = %s\n", indent, prop.name);
printf("%sdevice.maxThreadsPerBlock = %d\n", indent, prop.maxThreadsPerBlock);
}
}
int main(int argc, char **argv) {
uint n = atoi(argv[1]);
int size = n*sizeof(float);
float *x, *y, *yy;
float *dev_x, *dev_y;
misc();
x = (float *)malloc(size);
y = (float *)malloc(size);
yy = (float *)malloc(size);
for(int i = 0; i < n; i++) {
x[i] = i;
y[i] = i*i;
}
hipMalloc((void**)(&dev_x), size);
hipMalloc((void**)(&dev_y), size);
hipMemcpy(dev_x, x, size, hipMemcpyHostToDevice);
hipMemcpy(dev_y, y, size, hipMemcpyHostToDevice);
float a = 3.0;
saxpy<<<ceil(n/256.0),256>>>(n, a, dev_x, dev_y);
hipMemcpy(yy, dev_y, size, hipMemcpyDeviceToHost);
for(int i = 0; i < n; i++) { // check the result
if(yy[i] != a*x[i] + y[i]) {
fprintf(stderr, "ERROR: i=%d, a = %s, x[i]=%f, y[i]=%f, yy[i]=%f\n",
i, a, x[i], y[i], yy[i]);
exit(-1);
}
}
printf("The results match!\n");
free(x);
free(y);
free(yy);
hipFree(dev_x);
hipFree(dev_y);
exit(0);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5saxpyjfPfS_
.globl _Z5saxpyjfPfS_
.p2align 8
.type _Z5saxpyjfPfS_,@function
_Z5saxpyjfPfS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x8
v_mov_b32_e32 v2, 0
s_load_b32 s0, s[0:1], 0x4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v3, s0, v2
global_store_b32 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5saxpyjfPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5saxpyjfPfS_, .Lfunc_end0-_Z5saxpyjfPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5saxpyjfPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z5saxpyjfPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void saxpy(uint n, float a, float *x, float *y) {
uint i = blockIdx.x*blockDim.x + threadIdx.x; // nvcc built-ins
if(i < n)
y[i] = a*x[i] + y[i];
}
void misc(void) {
int ndev;
hipDeviceProp_t prop;
hipGetDeviceCount(&ndev);
printf("This machine has %d CUDA devices.\n", ndev);
for(int i = 0; i < ndev; i++) {
const char *indent = (ndev == 0) ? "" : " ";
hipGetDeviceProperties(&prop, i);
if(ndev > 0)
printf("Device %d:\n", i);
printf("%sdevice.name = %s\n", indent, prop.name);
printf("%sdevice.maxThreadsPerBlock = %d\n", indent, prop.maxThreadsPerBlock);
}
}
int main(int argc, char **argv) {
uint n = atoi(argv[1]);
int size = n*sizeof(float);
float *x, *y, *yy;
float *dev_x, *dev_y;
misc();
x = (float *)malloc(size);
y = (float *)malloc(size);
yy = (float *)malloc(size);
for(int i = 0; i < n; i++) {
x[i] = i;
y[i] = i*i;
}
hipMalloc((void**)(&dev_x), size);
hipMalloc((void**)(&dev_y), size);
hipMemcpy(dev_x, x, size, hipMemcpyHostToDevice);
hipMemcpy(dev_y, y, size, hipMemcpyHostToDevice);
float a = 3.0;
saxpy<<<ceil(n/256.0),256>>>(n, a, dev_x, dev_y);
hipMemcpy(yy, dev_y, size, hipMemcpyDeviceToHost);
for(int i = 0; i < n; i++) { // check the result
if(yy[i] != a*x[i] + y[i]) {
fprintf(stderr, "ERROR: i=%d, a = %s, x[i]=%f, y[i]=%f, yy[i]=%f\n",
i, a, x[i], y[i], yy[i]);
exit(-1);
}
}
printf("The results match!\n");
free(x);
free(y);
free(yy);
hipFree(dev_x);
hipFree(dev_y);
exit(0);
} | .text
.file "saxpy.hip"
.globl _Z20__device_stub__saxpyjfPfS_ # -- Begin function _Z20__device_stub__saxpyjfPfS_
.p2align 4, 0x90
.type _Z20__device_stub__saxpyjfPfS_,@function
_Z20__device_stub__saxpyjfPfS_: # @_Z20__device_stub__saxpyjfPfS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5saxpyjfPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z20__device_stub__saxpyjfPfS_, .Lfunc_end0-_Z20__device_stub__saxpyjfPfS_
.cfi_endproc
# -- End function
.globl _Z4miscv # -- Begin function _Z4miscv
.p2align 4, 0x90
.type _Z4miscv,@function
_Z4miscv: # @_Z4miscv
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 1520
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %rbp, -16
leaq 12(%rsp), %rdi
callq hipGetDeviceCount
movl 12(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
cmpl $0, 12(%rsp)
jle .LBB1_7
# %bb.1: # %.lr.ph
movl $1, %eax
leaq 16(%rsp), %rbx
xorl %ebp, %ebp
jmp .LBB1_2
.p2align 4, 0x90
.LBB1_6: # in Loop: Header=BB1_2 Depth=1
movl $.L.str.4, %edi
movq %r14, %rsi
movq %rbx, %rdx
xorl %eax, %eax
callq printf
movl 336(%rsp), %edx
movl $.L.str.5, %edi
movq %r14, %rsi
xorl %eax, %eax
callq printf
incl %ebp
movl 12(%rsp), %eax
cmpl %eax, %ebp
jge .LBB1_7
.LBB1_2: # =>This Inner Loop Header: Depth=1
movl $.L.str.1, %r14d
testl %eax, %eax
je .LBB1_4
# %bb.3: # in Loop: Header=BB1_2 Depth=1
movl $.L.str.2, %r14d
.LBB1_4: # in Loop: Header=BB1_2 Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
cmpl $0, 12(%rsp)
jle .LBB1_6
# %bb.5: # in Loop: Header=BB1_2 Depth=1
movl $.L.str.3, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
jmp .LBB1_6
.LBB1_7: # %._crit_edge
addq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z4miscv, .Lfunc_end1-_Z4miscv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x3f70000000000000 # double 0.00390625
.LCPI2_2:
.quad 0x4008000000000000 # double 3
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI2_1:
.long 0x40400000 # float 3
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $128, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r12
leal (,%rax,4), %ebx
callq _Z4miscv
movslq %ebx, %r13
movq %r13, %rdi
callq malloc
movq %rax, %rbx
movq %r13, %rdi
callq malloc
movq %rax, %r14
movq %r13, %rdi
callq malloc
movq %rax, %r15
testl %r12d, %r12d
je .LBB2_3
# %bb.1: # %.lr.ph.preheader
movl %r12d, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %ecx, %xmm0
movl %ecx, %edx
imull %ecx, %edx
xorps %xmm1, %xmm1
cvtsi2ss %edx, %xmm1
movss %xmm0, (%rbx,%rcx,4)
movss %xmm1, (%r14,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB2_2
.LBB2_3: # %._crit_edge
leaq 16(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %rbx, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r12d, %eax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
mulsd .LCPI2_0(%rip), %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %rax
movl %eax, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $256, %rdx # imm = 0x100
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_5
# %bb.4:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movl %r12d, 28(%rsp)
movl $1077936128, 24(%rsp) # imm = 0x40400000
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z5saxpyjfPfS_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_5:
movq 8(%rsp), %rsi
movq %r15, %rdi
movq %r13, %rdx
movl $2, %ecx
callq hipMemcpy
testl %r12d, %r12d
je .LBB2_9
# %bb.6: # %.lr.ph60.preheader
movl %r12d, %eax
xorl %edx, %edx
movss .LCPI2_1(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB2_7: # %.lr.ph60
# =>This Inner Loop Header: Depth=1
movss (%r15,%rdx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
movss (%rbx,%rdx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
movaps %xmm1, %xmm4
mulss %xmm0, %xmm4
movss (%r14,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm2, %xmm4
ucomiss %xmm4, %xmm3
jne .LBB2_10
jp .LBB2_10
# %bb.8: # in Loop: Header=BB2_7 Depth=1
incq %rdx
cmpq %rdx, %rax
jne .LBB2_7
.LBB2_9: # %._crit_edge61
movl $.Lstr, %edi
callq puts@PLT
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %edi, %edi
callq exit
.LBB2_10:
movq stderr(%rip), %rdi
cvtss2sd %xmm1, %xmm1
cvtss2sd %xmm2, %xmm2
cvtss2sd %xmm3, %xmm3
movsd .LCPI2_2(%rip), %xmm0 # xmm0 = mem[0],zero
movl $.L.str.6, %esi
# kill: def $edx killed $edx killed $rdx
movb $4, %al
callq fprintf
movl $-1, %edi
callq exit
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5saxpyjfPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5saxpyjfPfS_,@object # @_Z5saxpyjfPfS_
.section .rodata,"a",@progbits
.globl _Z5saxpyjfPfS_
.p2align 3, 0x0
_Z5saxpyjfPfS_:
.quad _Z20__device_stub__saxpyjfPfS_
.size _Z5saxpyjfPfS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "This machine has %d CUDA devices.\n"
.size .L.str, 35
.type .L.str.1,@object # @.str.1
.L.str.1:
.zero 1
.size .L.str.1, 1
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz " "
.size .L.str.2, 3
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Device %d:\n"
.size .L.str.3, 12
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "%sdevice.name = %s\n"
.size .L.str.4, 20
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "%sdevice.maxThreadsPerBlock = %d\n"
.size .L.str.5, 34
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "ERROR: i=%d, a = %s, x[i]=%f, y[i]=%f, yy[i]=%f\n"
.size .L.str.6, 49
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z5saxpyjfPfS_"
.size .L__unnamed_1, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "The results match!"
.size .Lstr, 19
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__saxpyjfPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5saxpyjfPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z5saxpyjfPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x160], PT ; /* 0x0000580004007a0c */
/* 0x000fda0003f06070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE.U32 R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fc800078e0005 */
/*0090*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fe400078e0005 */
/*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ FFMA R7, R2, c[0x0][0x164], R7 ; /* 0x0000590002077a23 */
/* 0x004fca0000000007 */
/*00d0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5saxpyjfPfS_
.globl _Z5saxpyjfPfS_
.p2align 8
.type _Z5saxpyjfPfS_,@function
_Z5saxpyjfPfS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x8
v_mov_b32_e32 v2, 0
s_load_b32 s0, s[0:1], 0x4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v3, s0, v2
global_store_b32 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5saxpyjfPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5saxpyjfPfS_, .Lfunc_end0-_Z5saxpyjfPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5saxpyjfPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z5saxpyjfPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00020df7_00000000-6_saxpy.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string ""
.LC1:
.string " "
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "This machine has %d CUDA devices.\n"
.section .rodata.str1.1
.LC3:
.string "Device %d:\n"
.LC4:
.string "%sdevice.name = %s\n"
.section .rodata.str1.8
.align 8
.LC5:
.string "%sdevice.maxThreadsPerBlock = %d\n"
.text
.globl _Z4miscv
.type _Z4miscv, @function
_Z4miscv:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $1064, %rsp
.cfi_def_cfa_offset 1120
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %ebp
leaq .LC1(%rip), %rbx
leaq 16(%rsp), %r12
leaq .LC3(%rip), %r15
leaq .LC4(%rip), %r14
leaq .LC5(%rip), %r13
cmpl $0, 12(%rsp)
jg .L4
.L3:
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $1064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
movq %r12, %rcx
movq %rbx, %rdx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %ecx
movq %rbx, %rdx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebp
movl 12(%rsp), %eax
cmpl %ebp, %eax
jle .L3
testl %eax, %eax
leaq .LC1(%rip), %rbx
leaq .LC0(%rip), %rax
cmove %rax, %rbx
.L4:
movl %ebp, %esi
movq %r12, %rdi
call cudaGetDeviceProperties_v2@PLT
cmpl $0, 12(%rsp)
jle .L7
movl %ebp, %edx
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L7
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z4miscv, .-_Z4miscv
.globl _Z28__device_stub__Z5saxpyjfPfS_jfPfS_
.type _Z28__device_stub__Z5saxpyjfPfS_jfPfS_, @function
_Z28__device_stub__Z5saxpyjfPfS_jfPfS_:
.LFB2083:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movss %xmm0, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5saxpyjfPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z28__device_stub__Z5saxpyjfPfS_jfPfS_, .-_Z28__device_stub__Z5saxpyjfPfS_jfPfS_
.globl _Z5saxpyjfPfS_
.type _Z5saxpyjfPfS_, @function
_Z5saxpyjfPfS_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z5saxpyjfPfS_jfPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z5saxpyjfPfS_, .-_Z5saxpyjfPfS_
.section .rodata.str1.8
.align 8
.LC12:
.string "ERROR: i=%d, a = %s, x[i]=%f, y[i]=%f, yy[i]=%f\n"
.section .rodata.str1.1
.LC13:
.string "The results match!\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r12
movl %eax, %r15d
call _Z4miscv
leal 0(,%r12,4), %r14d
movslq %r14d, %r14
movq %r14, %rdi
call malloc@PLT
movq %rax, %rbp
movq %r14, %rdi
call malloc@PLT
movq %rax, %rbx
movq %r14, %rdi
call malloc@PLT
movq %rax, %r13
testl %r12d, %r12d
je .L24
movl %r12d, %ecx
movl $0, %eax
.L25:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rax,4)
movl %eax, %edx
imull %eax, %edx
pxor %xmm0, %xmm0
cvtsi2ssl %edx, %xmm0
movss %xmm0, (%rbx,%rax,4)
addq $1, %rax
cmpq %rcx, %rax
jne .L25
.L24:
movq %rsp, %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r14, %rdx
movq %rbp, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r14, %rdx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl %r12d, %eax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
mulsd .LC6(%rip), %xmm0
movapd %xmm0, %xmm3
movsd .LC14(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC7(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L28
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC9(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L28:
cvttsd2siq %xmm3, %rax
movl %eax, 16(%rsp)
movl $1, 20(%rsp)
movl 36(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L39
.L29:
movl $2, %ecx
movq %r14, %rdx
movq 8(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
testl %r12d, %r12d
je .L30
movl %r12d, %r12d
movl $0, %ecx
movss .LC10(%rip), %xmm4
.L33:
movss 0(%r13,%rcx,4), %xmm3
movss 0(%rbp,%rcx,4), %xmm1
movss (%rbx,%rcx,4), %xmm2
movaps %xmm1, %xmm0
mulss %xmm4, %xmm0
addss %xmm2, %xmm0
ucomiss %xmm0, %xmm3
jp .L35
jne .L35
addq $1, %rcx
cmpq %r12, %rcx
jne .L33
.L30:
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movl $0, %edi
call exit@PLT
.L39:
movq 8(%rsp), %rdx
movq (%rsp), %rsi
movss .LC10(%rip), %xmm0
movl %r15d, %edi
call _Z28__device_stub__Z5saxpyjfPfS_jfPfS_
jmp .L29
.L35:
cvtss2sd %xmm3, %xmm3
cvtss2sd %xmm2, %xmm2
cvtss2sd %xmm1, %xmm1
movsd .LC11(%rip), %xmm0
leaq .LC12(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $4, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC15:
.string "_Z5saxpyjfPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z5saxpyjfPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC6:
.long 0
.long 1064304640
.align 8
.LC7:
.long 0
.long 1127219200
.align 8
.LC9:
.long 0
.long 1072693248
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC10:
.long 1077936128
.section .rodata.cst8
.align 8
.LC11:
.long 0
.long 1074266112
.align 8
.LC14:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "saxpy.hip"
.globl _Z20__device_stub__saxpyjfPfS_ # -- Begin function _Z20__device_stub__saxpyjfPfS_
.p2align 4, 0x90
.type _Z20__device_stub__saxpyjfPfS_,@function
_Z20__device_stub__saxpyjfPfS_: # @_Z20__device_stub__saxpyjfPfS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5saxpyjfPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z20__device_stub__saxpyjfPfS_, .Lfunc_end0-_Z20__device_stub__saxpyjfPfS_
.cfi_endproc
# -- End function
.globl _Z4miscv # -- Begin function _Z4miscv
.p2align 4, 0x90
.type _Z4miscv,@function
_Z4miscv: # @_Z4miscv
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 1520
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %rbp, -16
leaq 12(%rsp), %rdi
callq hipGetDeviceCount
movl 12(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
cmpl $0, 12(%rsp)
jle .LBB1_7
# %bb.1: # %.lr.ph
movl $1, %eax
leaq 16(%rsp), %rbx
xorl %ebp, %ebp
jmp .LBB1_2
.p2align 4, 0x90
.LBB1_6: # in Loop: Header=BB1_2 Depth=1
movl $.L.str.4, %edi
movq %r14, %rsi
movq %rbx, %rdx
xorl %eax, %eax
callq printf
movl 336(%rsp), %edx
movl $.L.str.5, %edi
movq %r14, %rsi
xorl %eax, %eax
callq printf
incl %ebp
movl 12(%rsp), %eax
cmpl %eax, %ebp
jge .LBB1_7
.LBB1_2: # =>This Inner Loop Header: Depth=1
movl $.L.str.1, %r14d
testl %eax, %eax
je .LBB1_4
# %bb.3: # in Loop: Header=BB1_2 Depth=1
movl $.L.str.2, %r14d
.LBB1_4: # in Loop: Header=BB1_2 Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
cmpl $0, 12(%rsp)
jle .LBB1_6
# %bb.5: # in Loop: Header=BB1_2 Depth=1
movl $.L.str.3, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
jmp .LBB1_6
.LBB1_7: # %._crit_edge
addq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z4miscv, .Lfunc_end1-_Z4miscv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x3f70000000000000 # double 0.00390625
.LCPI2_2:
.quad 0x4008000000000000 # double 3
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI2_1:
.long 0x40400000 # float 3
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $128, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r12
leal (,%rax,4), %ebx
callq _Z4miscv
movslq %ebx, %r13
movq %r13, %rdi
callq malloc
movq %rax, %rbx
movq %r13, %rdi
callq malloc
movq %rax, %r14
movq %r13, %rdi
callq malloc
movq %rax, %r15
testl %r12d, %r12d
je .LBB2_3
# %bb.1: # %.lr.ph.preheader
movl %r12d, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %ecx, %xmm0
movl %ecx, %edx
imull %ecx, %edx
xorps %xmm1, %xmm1
cvtsi2ss %edx, %xmm1
movss %xmm0, (%rbx,%rcx,4)
movss %xmm1, (%r14,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB2_2
.LBB2_3: # %._crit_edge
leaq 16(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %rbx, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r12d, %eax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
mulsd .LCPI2_0(%rip), %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %rax
movl %eax, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $256, %rdx # imm = 0x100
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_5
# %bb.4:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movl %r12d, 28(%rsp)
movl $1077936128, 24(%rsp) # imm = 0x40400000
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z5saxpyjfPfS_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_5:
movq 8(%rsp), %rsi
movq %r15, %rdi
movq %r13, %rdx
movl $2, %ecx
callq hipMemcpy
testl %r12d, %r12d
je .LBB2_9
# %bb.6: # %.lr.ph60.preheader
movl %r12d, %eax
xorl %edx, %edx
movss .LCPI2_1(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB2_7: # %.lr.ph60
# =>This Inner Loop Header: Depth=1
movss (%r15,%rdx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
movss (%rbx,%rdx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
movaps %xmm1, %xmm4
mulss %xmm0, %xmm4
movss (%r14,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm2, %xmm4
ucomiss %xmm4, %xmm3
jne .LBB2_10
jp .LBB2_10
# %bb.8: # in Loop: Header=BB2_7 Depth=1
incq %rdx
cmpq %rdx, %rax
jne .LBB2_7
.LBB2_9: # %._crit_edge61
movl $.Lstr, %edi
callq puts@PLT
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %edi, %edi
callq exit
.LBB2_10:
movq stderr(%rip), %rdi
cvtss2sd %xmm1, %xmm1
cvtss2sd %xmm2, %xmm2
cvtss2sd %xmm3, %xmm3
movsd .LCPI2_2(%rip), %xmm0 # xmm0 = mem[0],zero
movl $.L.str.6, %esi
# kill: def $edx killed $edx killed $rdx
movb $4, %al
callq fprintf
movl $-1, %edi
callq exit
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5saxpyjfPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5saxpyjfPfS_,@object # @_Z5saxpyjfPfS_
.section .rodata,"a",@progbits
.globl _Z5saxpyjfPfS_
.p2align 3, 0x0
_Z5saxpyjfPfS_:
.quad _Z20__device_stub__saxpyjfPfS_
.size _Z5saxpyjfPfS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "This machine has %d CUDA devices.\n"
.size .L.str, 35
.type .L.str.1,@object # @.str.1
.L.str.1:
.zero 1
.size .L.str.1, 1
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz " "
.size .L.str.2, 3
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Device %d:\n"
.size .L.str.3, 12
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "%sdevice.name = %s\n"
.size .L.str.4, 20
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "%sdevice.maxThreadsPerBlock = %d\n"
.size .L.str.5, 34
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "ERROR: i=%d, a = %s, x[i]=%f, y[i]=%f, yy[i]=%f\n"
.size .L.str.6, 49
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z5saxpyjfPfS_"
.size .L__unnamed_1, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "The results match!"
.size .Lstr, 19
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__saxpyjfPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5saxpyjfPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* CUDA accelerated coulombic potential grid test code
* John E. Stone and Chris Rodrigues
* http://www.ks.uiuc.edu/~johns/
*/
#include <stdio.h>
#include <stdlib.h>
#define CUERR { cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \
return -1; }}
// max constant buffer size is 64KB, minus whatever
// the CUDA runtime and compiler are using that we don't know about
// At 16 bytes for atom, for this program 4070 atoms is about the max
// we can store in the constant buffer.
#define MAXATOMS 4000
__constant__ float4 atominfo[MAXATOMS];
#define UNROLLX 8
#define UNROLLY 1
#define BLOCKSIZEX 16
#define BLOCKSIZEY 16
#define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY
// This kernel calculates coulombic potential at each grid point and
// stores the results in the output array.
//
// This kernel was written by Chris Rodrigues of Wen-mei's group
//
__global__ void cenergy(int numatoms, float gridspacing, float * energygrid) {
unsigned int xindex = __umul24(blockIdx.x, blockDim.x) * UNROLLX
+ threadIdx.x;
unsigned int yindex = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
unsigned int outaddr = (__umul24(gridDim.x, blockDim.x) * UNROLLX) * yindex
+ xindex;
float coory = gridspacing * yindex;
float coorx = gridspacing * xindex;
float energyvalx1=0.0f;
float energyvalx2=0.0f;
float energyvalx3=0.0f;
float energyvalx4=0.0f;
#if UNROLLX == 8
float energyvalx5=0.0f;
float energyvalx6=0.0f;
float energyvalx7=0.0f;
float energyvalx8=0.0f;
#endif
float gridspacing_u = gridspacing * BLOCKSIZEX;
//
// XXX 59/8 FLOPS per atom
//
int atomid;
for (atomid=0; atomid<numatoms; atomid++) {
float dy = coory - atominfo[atomid].y;
float dyz2 = (dy * dy) + atominfo[atomid].z;
float atomq=atominfo[atomid].w;
float dx1 = coorx - atominfo[atomid].x;
float dx2 = dx1 + gridspacing_u;
float dx3 = dx2 + gridspacing_u;
float dx4 = dx3 + gridspacing_u;
#if UNROLLX == 8
float dx5 = dx4 + gridspacing_u;
float dx6 = dx5 + gridspacing_u;
float dx7 = dx6 + gridspacing_u;
float dx8 = dx7 + gridspacing_u;
#endif
energyvalx1 += atomq * rsqrtf(dx1*dx1 + dyz2);
energyvalx2 += atomq * rsqrtf(dx2*dx2 + dyz2);
energyvalx3 += atomq * rsqrtf(dx3*dx3 + dyz2);
energyvalx4 += atomq * rsqrtf(dx4*dx4 + dyz2);
#if UNROLLX == 8
energyvalx5 += atomq * rsqrtf(dx5*dx5 + dyz2);
energyvalx6 += atomq * rsqrtf(dx6*dx6 + dyz2);
energyvalx7 += atomq * rsqrtf(dx7*dx7 + dyz2);
energyvalx8 += atomq * rsqrtf(dx8*dx8 + dyz2);
#endif
}
energygrid[outaddr ] += energyvalx1;
energygrid[outaddr+1*BLOCKSIZEX] += energyvalx2;
energygrid[outaddr+2*BLOCKSIZEX] += energyvalx3;
energygrid[outaddr+3*BLOCKSIZEX] += energyvalx4;
#if UNROLLX == 8
energygrid[outaddr+4*BLOCKSIZEX] += energyvalx5;
energygrid[outaddr+5*BLOCKSIZEX] += energyvalx6;
energygrid[outaddr+6*BLOCKSIZEX] += energyvalx7;
energygrid[outaddr+7*BLOCKSIZEX] += energyvalx8;
#endif
}
int copyatomstoconstbuf(float *atoms, int count, float zplane) {
CUERR // check and clear any existing errors
if (count > MAXATOMS) {
printf("Atom count exceeds constant buffer storage capacity\n");
return -1;
}
float atompre[4*MAXATOMS];
int i;
for (i=0; i<count*4; i+=4) {
atompre[i ] = atoms[i ];
atompre[i + 1] = atoms[i + 1];
float dz = zplane - atoms[i + 2];
atompre[i + 2] = dz*dz;
atompre[i + 3] = atoms[i + 3];
}
cudaMemcpyToSymbol(atominfo, atompre, count * 4 * sizeof(float), 0);
CUERR // check and clear any existing errors
return 0;
}
int initatoms(float **atombuf, int count, dim3 volsize, float gridspacing) {
dim3 size;
int i;
float *atoms;
atoms = (float *) malloc(count * 4 * sizeof(float));
*atombuf = atoms;
// compute grid dimensions in angstroms
size.x = (unsigned int) gridspacing * volsize.x;
size.y = (unsigned int) gridspacing * volsize.y;
size.z = (unsigned int) gridspacing * volsize.z;
for (i=0; i<count; i++) {
int addr = i * 4;
atoms[addr ] = (rand() / (float) RAND_MAX) * size.x;
atoms[addr + 1] = (rand() / (float) RAND_MAX) * size.y;
atoms[addr + 2] = (rand() / (float) RAND_MAX) * size.z;
atoms[addr + 3] = ((rand() / (float) RAND_MAX) * 2.0) - 1.0; // charge
}
return 0;
} | code for sm_80
Function : _Z7cenergyifPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2UR UR4, SR_CTAID.X ; /* 0x00000000000479c3 */
/* 0x000e220000002500 */
/*0020*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e620000002200 */
/*0030*/ MOV R5, 0xffffff ; /* 0x00ffffff00057802 */
/* 0x000fe20000000f00 */
/*0040*/ ULDC.64 UR10, c[0x0][0x118] ; /* 0x00004600000a7ab9 */
/* 0x000fe20000000a00 */
/*0050*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0060*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000ea20000002100 */
/*0070*/ MOV R11, c[0x0][0x160] ; /* 0x00005800000b7a02 */
/* 0x000fe20000000f00 */
/*0080*/ HFMA2.MMA R18, -RZ, RZ, 0, 0 ; /* 0x00000000ff127435 */
/* 0x000fe200000001ff */
/*0090*/ S2UR UR5, SR_CTAID.Y ; /* 0x00000000000579c3 */
/* 0x000ee20000002600 */
/*00a0*/ LOP3.LUT R2, R5.reuse, c[0x0][0x4], RZ, 0xc0, !PT ; /* 0x0000010005027a12 */
/* 0x040fe200078ec0ff */
/*00b0*/ CS2R R14, SRZ ; /* 0x00000000000e7805 */
/* 0x000fe2000001ff00 */
/*00c0*/ LOP3.LUT R5, R5, c[0x0][0xc], RZ, 0xc0, !PT ; /* 0x0000030005057a12 */
/* 0x000fe200078ec0ff */
/*00d0*/ CS2R R12, SRZ ; /* 0x00000000000c7805 */
/* 0x000fe2000001ff00 */
/*00e0*/ LOP3.LUT R0, R0, 0xffffff, RZ, 0xc0, !PT ; /* 0x00ffffff00007812 */
/* 0x000fc400078ec0ff */
/*00f0*/ ISETP.GE.AND P0, PT, R11, 0x1, PT ; /* 0x000000010b00780c */
/* 0x000fe40003f06270 */
/*0100*/ MOV R17, RZ ; /* 0x000000ff00117202 */
/* 0x000fe20000000f00 */
/*0110*/ IMAD R5, R0, R5, RZ ; /* 0x0000000500057224 */
/* 0x000fca00078e02ff */
/*0120*/ SHF.L.U32 R5, R5, 0x3, RZ ; /* 0x0000000305057819 */
/* 0x000fe200000006ff */
/*0130*/ USHF.L.U32 UR4, UR4, 0x3, URZ ; /* 0x0000000304047899 */
/* 0x001fc8000800063f */
/*0140*/ ULOP3.LUT UR4, UR4, 0x7fffff8, URZ, 0xc0, !UPT ; /* 0x07fffff804047892 */
/* 0x000fe4000f8ec03f */
/*0150*/ ULOP3.LUT UR5, UR5, 0xffffff, URZ, 0xc0, !UPT ; /* 0x00ffffff05057892 */
/* 0x008fc8000f8ec03f */
/*0160*/ IMAD R0, R0, UR4, R7 ; /* 0x0000000400007c24 */
/* 0x004fe4000f8e0207 */
/*0170*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0180*/ IMAD R3, R2, UR5, R3 ; /* 0x0000000502037c24 */
/* 0x002fc8000f8e0203 */
/*0190*/ IMAD R8, R3, R5, R0 ; /* 0x0000000503087224 */
/* 0x000fe200078e0200 */
/*01a0*/ @!P0 BRA 0xea0 ; /* 0x00000cf000008947 */
/* 0x000fea0003800000 */
/*01b0*/ ISETP.NE.AND P0, PT, R11.reuse, 0x1, PT ; /* 0x000000010b00780c */
/* 0x040fe20003f05270 */
/*01c0*/ I2F.U32 R10, R3 ; /* 0x00000003000a7306 */
/* 0x0000620000201000 */
/*01d0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*01e0*/ LOP3.LUT R11, R11, 0x1, RZ, 0xc0, !PT ; /* 0x000000010b0b7812 */
/* 0x000fe200078ec0ff */
/*01f0*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0200*/ CS2R R12, SRZ ; /* 0x00000000000c7805 */
/* 0x000fe2000001ff00 */
/*0210*/ CS2R R14, SRZ ; /* 0x00000000000e7805 */
/* 0x000fe2000001ff00 */
/*0220*/ MOV R17, RZ ; /* 0x000000ff00117202 */
/* 0x000fe40000000f00 */
/*0230*/ I2F.U32 R9, R0 ; /* 0x0000000000097306 */
/* 0x0000a20000201000 */
/*0240*/ MOV R18, RZ ; /* 0x000000ff00127202 */
/* 0x000fc60000000f00 */
/*0250*/ @!P0 BRA 0xa90 ; /* 0x0000083000008947 */
/* 0x000fea0003800000 */
/*0260*/ HFMA2.MMA R7, -RZ, RZ, 0, 0 ; /* 0x00000000ff077435 */
/* 0x000fe200000001ff */
/*0270*/ IADD3 R0, -R11, c[0x0][0x160], RZ ; /* 0x000058000b007a10 */
/* 0x001fe20007ffe1ff */
/*0280*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe40008000000 */
/*0290*/ USHF.L.U32 UR5, UR4, 0x4, URZ ; /* 0x0000000404057899 */
/* 0x000fe2000800063f */
/*02a0*/ MOV R16, c[0x0][0x164] ; /* 0x0000590000107a02 */
/* 0x000fe20000000f00 */
/*02b0*/ UIADD3 UR4, UR4, 0x2, URZ ; /* 0x0000000204047890 */
/* 0x000fe2000fffe03f */
/*02c0*/ IADD3 R0, R0, -0x2, RZ ; /* 0xfffffffe00007810 */
/* 0x000fc60007ffe0ff */
/*02d0*/ MOV R20, UR5 ; /* 0x0000000500147c02 */
/* 0x000fe40008000f00 */
/*02e0*/ MOV R22, UR5 ; /* 0x0000000500167c02 */
/* 0x000fe40008000f00 */
/*02f0*/ LDC.64 R4, c[0x3][R20] ; /* 0x00c0000014047b82 */
/* 0x000e240000000a00 */
/*0300*/ ULDC.64 UR8, c[0x3][UR5+0x8] ; /* 0x00c0020005087abb */
/* 0x000fe40008000a00 */
/*0310*/ ULDC.64 UR6, c[0x3][UR5+0x18] ; /* 0x00c0060005067abb */
/* 0x000fc80008000a00 */
/*0320*/ LDC.64 R2, c[0x3][R22+0x10] ; /* 0x00c0040016027b82 */
/* 0x000ee20000000a00 */
/*0330*/ FFMA R5, R10, c[0x0][0x164], -R5 ; /* 0x000059000a057a23 */
/* 0x003fe40000000805 */
/*0340*/ FFMA R4, R9, c[0x0][0x164], -R4 ; /* 0x0000590009047a23 */
/* 0x004fe40000000804 */
/*0350*/ FFMA R21, R5, R5, UR8 ; /* 0x0000000805157e23 */
/* 0x000fc80008000005 */
/*0360*/ FFMA R5, R4, R4, R21 ; /* 0x0000000404057223 */
/* 0x000fe40000000015 */
/*0370*/ FFMA R4, R16, 16, R4 ; /* 0x4180000010047823 */
/* 0x000fe40000000004 */
/*0380*/ FFMA R3, R10, c[0x0][0x164], -R3 ; /* 0x000059000a037a23 */
/* 0x008fe20000000803 */
/*0390*/ FSETP.GEU.AND P0, PT, |R5|, 1.175494350822287508e-38, PT ; /* 0x008000000500780b */
/* 0x000fe20003f0e200 */
/*03a0*/ FFMA R26, R4, R4, R21 ; /* 0x00000004041a7223 */
/* 0x000fe40000000015 */
/*03b0*/ FFMA R4, R16, 16, R4 ; /* 0x4180000010047823 */
/* 0x000fe40000000004 */
/*03c0*/ FFMA R19, R3, R3, UR6 ; /* 0x0000000603137e23 */
/* 0x000fe20008000003 */
/*03d0*/ FSETP.GEU.AND P3, PT, |R26|, 1.175494350822287508e-38, PT ; /* 0x008000001a00780b */
/* 0x000fe20003f6e200 */
/*03e0*/ FFMA R3, R16, 16, R4 ; /* 0x4180000010037823 */
/* 0x000fc40000000004 */
/*03f0*/ FFMA R23, R4, R4, R21.reuse ; /* 0x0000000404177223 */
/* 0x100fe40000000015 */
/*0400*/ FFMA R24, R3, R3, R21 ; /* 0x0000000303187223 */
/* 0x000fe40000000015 */
/*0410*/ @!P0 FMUL R5, R5, 16777216 ; /* 0x4b80000005058820 */
/* 0x000fe20000400000 */
/*0420*/ FSETP.GEU.AND P4, PT, |R23|, 1.175494350822287508e-38, PT ; /* 0x008000001700780b */
/* 0x000fe20003f8e200 */
/*0430*/ FFMA R3, R16, 16, R3 ; /* 0x4180000010037823 */
/* 0x000fe20000000003 */
/*0440*/ FSETP.GEU.AND P2, PT, |R24|, 1.175494350822287508e-38, PT ; /* 0x008000001800780b */
/* 0x000fe20003f4e200 */
/*0450*/ MUFU.RSQ R25, R5 ; /* 0x0000000500197308 */
/* 0x000e220000001400 */
/*0460*/ FFMA R2, R9, c[0x0][0x164], -R2 ; /* 0x0000590009027a23 */
/* 0x000fe40000000802 */
/*0470*/ @!P3 FMUL R26, R26, 16777216 ; /* 0x4b8000001a1ab820 */
/* 0x000fc40000400000 */
/*0480*/ FFMA R20, R16, 16, R3 ; /* 0x4180000010147823 */
/* 0x000fe40000000003 */
/*0490*/ FFMA R27, R2, R2, R19.reuse ; /* 0x00000002021b7223 */
/* 0x100fe40000000013 */
/*04a0*/ MUFU.RSQ R26, R26 ; /* 0x0000001a001a7308 */
/* 0x000e620000001400 */
/*04b0*/ @!P4 FMUL R23, R23, 16777216 ; /* 0x4b8000001717c820 */
/* 0x000fe40000400000 */
/*04c0*/ @!P2 FMUL R24, R24, 16777216 ; /* 0x4b8000001818a820 */
/* 0x000fe20000400000 */
/*04d0*/ FSETP.GEU.AND P1, PT, |R27|, 1.175494350822287508e-38, PT ; /* 0x008000001b00780b */
/* 0x000fe20003f2e200 */
/*04e0*/ FFMA R22, R16, 16, R2 ; /* 0x4180000010167823 */
/* 0x000fe40000000002 */
/*04f0*/ @!P0 FMUL R25, R25, 4096 ; /* 0x4580000019198820 */
/* 0x001fe20000400000 */
/*0500*/ MUFU.RSQ R23, R23 ; /* 0x0000001700177308 */
/* 0x000fe20000001400 */
/*0510*/ FFMA R4, R22, R22, R19 ; /* 0x0000001616047223 */
/* 0x000fc40000000013 */
/*0520*/ FFMA R5, R25, UR9, R18 ; /* 0x0000000919057c23 */
/* 0x000fe40008000012 */
/*0530*/ FFMA R25, R16, 16, R20 ; /* 0x4180000010197823 */
/* 0x000fe20000000014 */
/*0540*/ FSETP.GEU.AND P0, PT, |R4|, 1.175494350822287508e-38, PT ; /* 0x008000000400780b */
/* 0x000fe20003f0e200 */
/*0550*/ FFMA R18, R3, R3, R21 ; /* 0x0000000303127223 */
/* 0x000fe20000000015 */
/*0560*/ MUFU.RSQ R24, R24 ; /* 0x0000001800187308 */
/* 0x000e220000001400 */
/*0570*/ @!P3 FMUL R26, R26, 4096 ; /* 0x458000001a1ab820 */
/* 0x002fe40000400000 */
/*0580*/ @!P1 FMUL R27, R27, 16777216 ; /* 0x4b8000001b1b9820 */
/* 0x000fe20000400000 */
/*0590*/ FSETP.GEU.AND P3, PT, |R18|, 1.175494350822287508e-38, PT ; /* 0x008000001200780b */
/* 0x000fe20003f6e200 */
/*05a0*/ FFMA R3, R26, UR9, R17 ; /* 0x000000091a037c23 */
/* 0x000fc40008000011 */
/*05b0*/ FFMA R26, R16, 16, R25 ; /* 0x41800000101a7823 */
/* 0x000fe20000000019 */
/*05c0*/ MUFU.RSQ R2, R27 ; /* 0x0000001b00027308 */
/* 0x000e620000001400 */
/*05d0*/ FFMA R17, R20, R20, R21.reuse ; /* 0x0000001414117223 */
/* 0x100fe40000000015 */
/*05e0*/ FFMA R20, R25, R25, R21.reuse ; /* 0x0000001919147223 */
/* 0x100fe40000000015 */
/*05f0*/ FFMA R21, R26, R26, R21 ; /* 0x0000001a1a157223 */
/* 0x000fe20000000015 */
/*0600*/ FSETP.GEU.AND P6, PT, |R17|, 1.175494350822287508e-38, PT ; /* 0x008000001100780b */
/* 0x000fe20003fce200 */
/*0610*/ FFMA R25, R16, 16, R22 ; /* 0x4180000010197823 */
/* 0x000fe20000000016 */
/*0620*/ FSETP.GEU.AND P5, PT, |R20|, 1.175494350822287508e-38, PT ; /* 0x008000001400780b */
/* 0x000fe20003fae200 */
/*0630*/ @!P2 FMUL R24, R24, 4096 ; /* 0x458000001818a820 */
/* 0x001fe20000400000 */
/*0640*/ FSETP.GEU.AND P2, PT, |R21|, 1.175494350822287508e-38, PT ; /* 0x008000001500780b */
/* 0x000fe20003f4e200 */
/*0650*/ FFMA R22, R25, R25, R19 ; /* 0x0000001919167223 */
/* 0x000fc40000000013 */
/*0660*/ FFMA R25, R16, 16, R25 ; /* 0x4180000010197823 */
/* 0x000fe40000000019 */
/*0670*/ FFMA R15, R24, UR9, R15 ; /* 0x00000009180f7c23 */
/* 0x000fe4000800000f */
/*0680*/ @!P3 FMUL R18, R18, 16777216 ; /* 0x4b8000001212b820 */
/* 0x000fe40000400000 */
/*0690*/ FFMA R24, R16, 16, R25 ; /* 0x4180000010187823 */
/* 0x000fe40000000019 */
/*06a0*/ @!P4 FMUL R23, R23, 4096 ; /* 0x458000001717c820 */
/* 0x000fe20000400000 */
/*06b0*/ FSETP.GEU.AND P4, PT, |R22|, 1.175494350822287508e-38, PT ; /* 0x008000001600780b */
/* 0x000fe20003f8e200 */
/*06c0*/ @!P6 FMUL R17, R17, 16777216 ; /* 0x4b8000001111e820 */
/* 0x000fe20000400000 */
/*06d0*/ MUFU.RSQ R18, R18 ; /* 0x0000001200127308 */
/* 0x000e220000001400 */
/*06e0*/ @!P2 FMUL R21, R21, 16777216 ; /* 0x4b8000001515a820 */
/* 0x000fc40000400000 */
/*06f0*/ @!P5 FMUL R20, R20, 16777216 ; /* 0x4b8000001414d820 */
/* 0x000fe40000400000 */
/*0700*/ FFMA R28, R24, R24, R19.reuse ; /* 0x00000018181c7223 */
/* 0x100fe40000000013 */
/*0710*/ FFMA R29, R16, 16, R24 ; /* 0x41800000101d7823 */
/* 0x000fe20000000018 */
/*0720*/ MUFU.RSQ R17, R17 ; /* 0x0000001100117308 */
/* 0x000ea20000001400 */
/*0730*/ FFMA R14, R23, UR9, R14 ; /* 0x00000009170e7c23 */
/* 0x000fe4000800000e */
/*0740*/ FFMA R26, R25, R25, R19 ; /* 0x00000019191a7223 */
/* 0x000fe40000000013 */
/*0750*/ @!P1 FMUL R2, R2, 4096 ; /* 0x4580000002029820 */
/* 0x002fc40000400000 */
/*0760*/ @!P0 FMUL R4, R4, 16777216 ; /* 0x4b80000004048820 */
/* 0x000fe20000400000 */
/*0770*/ MUFU.RSQ R24, R21 ; /* 0x0000001500187308 */
/* 0x000e620000001400 */
/*0780*/ @!P3 FMUL R18, R18, 4096 ; /* 0x458000001212b820 */
/* 0x001fe20000400000 */
/*0790*/ FSETP.GEU.AND P3, PT, |R26|, 1.175494350822287508e-38, PT ; /* 0x008000001a00780b */
/* 0x000fe20003f6e200 */
/*07a0*/ @!P4 FMUL R22, R22, 16777216 ; /* 0x4b8000001616c820 */
/* 0x000fe40000400000 */
/*07b0*/ FFMA R13, R18, UR9, R13 ; /* 0x00000009120d7c23 */
/* 0x000fe4000800000d */
/*07c0*/ FFMA R18, R2, UR7, R5 ; /* 0x0000000702127c23 */
/* 0x000fe20008000005 */
/*07d0*/ MUFU.RSQ R23, R20 ; /* 0x0000001400177308 */
/* 0x0000e20000001400 */
/*07e0*/ @!P6 FMUL R17, R17, 4096 ; /* 0x458000001111e820 */
/* 0x004fe20000400000 */
/*07f0*/ FSETP.GEU.AND P6, PT, |R28|, 1.175494350822287508e-38, PT ; /* 0x008000001c00780b */
/* 0x000fc60003fce200 */
/*0800*/ FFMA R6, R17, UR9, R6 ; /* 0x0000000911067c23 */
/* 0x000fe40008000006 */
/*0810*/ @!P3 FMUL R26, R26, 16777216 ; /* 0x4b8000001a1ab820 */
/* 0x000fe20000400000 */
/*0820*/ MUFU.RSQ R4, R4 ; /* 0x0000000400047308 */
/* 0x000ea20000001400 */
/*0830*/ FFMA R20, R16.reuse, 16, R29 ; /* 0x4180000010147823 */
/* 0x041fe4000000001d */
/*0840*/ FFMA R29, R29, R29, R19.reuse ; /* 0x0000001d1d1d7223 */
/* 0x100fe40000000013 */
/*0850*/ FFMA R16, R16, 16, R20 ; /* 0x4180000010107823 */
/* 0x000fe40000000014 */
/*0860*/ FFMA R20, R20, R20, R19.reuse ; /* 0x0000001414147223 */
/* 0x100fe20000000013 */
/*0870*/ MUFU.RSQ R25, R22 ; /* 0x0000001600197308 */
/* 0x000e220000001400 */
/*0880*/ FFMA R27, R16, R16, R19 ; /* 0x00000010101b7223 */
/* 0x000fc40000000013 */
/*0890*/ @!P2 FMUL R24, R24, 4096 ; /* 0x458000001818a820 */
/* 0x002fe20000400000 */
/*08a0*/ FSETP.GEU.AND P2, PT, |R20|, 1.175494350822287508e-38, PT ; /* 0x008000001400780b */
/* 0x000fe20003f4e200 */
/*08b0*/ @!P5 FMUL R23, R23, 4096 ; /* 0x458000001717d820 */
/* 0x008fe20000400000 */
/*08c0*/ FSETP.GEU.AND P1, PT, |R27|, 1.175494350822287508e-38, PT ; /* 0x008000001b00780b */
/* 0x000fe20003f2e200 */
/*08d0*/ @!P6 FMUL R28, R28, 16777216 ; /* 0x4b8000001c1ce820 */
/* 0x000fe20000400000 */
/*08e0*/ FSETP.GEU.AND P5, PT, |R29|, 1.175494350822287508e-38, PT ; /* 0x008000001d00780b */
/* 0x000fe20003fae200 */
/*08f0*/ MUFU.RSQ R26, R26 ; /* 0x0000001a001a7308 */
/* 0x000e620000001400 */
/*0900*/ @!P0 FMUL R4, R4, 4096 ; /* 0x4580000004048820 */
/* 0x004fe20000400000 */
/*0910*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe20003f05270 */
/*0920*/ FFMA R23, R23, UR9, R12 ; /* 0x0000000917177c23 */
/* 0x000fe4000800000c */
/*0930*/ FFMA R24, R24, UR9, R7 ; /* 0x0000000918187c23 */
/* 0x000fc40008000007 */
/*0940*/ @!P4 FMUL R25, R25, 4096 ; /* 0x458000001919c820 */
/* 0x001fe20000400000 */
/*0950*/ MUFU.RSQ R16, R28 ; /* 0x0000001c00107308 */
/* 0x000e220000001400 */
/*0960*/ @!P2 FMUL R20, R20, 16777216 ; /* 0x4b8000001414a820 */
/* 0x000fe40000400000 */
/*0970*/ @!P1 FMUL R27, R27, 16777216 ; /* 0x4b8000001b1b9820 */
/* 0x000fe40000400000 */
/*0980*/ @!P5 FMUL R29, R29, 16777216 ; /* 0x4b8000001d1dd820 */
/* 0x000fe40000400000 */
/*0990*/ FFMA R17, R4, UR7, R3 ; /* 0x0000000704117c23 */
/* 0x000fe20008000003 */
/*09a0*/ MUFU.RSQ R20, R20 ; /* 0x0000001400147308 */
/* 0x000ea20000001400 */
/*09b0*/ @!P3 FMUL R26, R26, 4096 ; /* 0x458000001a1ab820 */
/* 0x002fc40000400000 */
/*09c0*/ FFMA R14, R25, UR7, R14 ; /* 0x00000007190e7c23 */
/* 0x000fe4000800000e */
/*09d0*/ FFMA R15, R26, UR7, R15 ; /* 0x000000071a0f7c23 */
/* 0x000fc6000800000f */
/*09e0*/ MUFU.RSQ R19, R29 ; /* 0x0000001d00137308 */
/* 0x000e620000001400 */
/*09f0*/ @!P6 FMUL R16, R16, 4096 ; /* 0x458000001010e820 */
/* 0x001fc80000400000 */
/*0a00*/ FFMA R13, R16, UR7, R13 ; /* 0x00000007100d7c23 */
/* 0x000fc6000800000d */
/*0a10*/ MUFU.RSQ R27, R27 ; /* 0x0000001b001b7308 */
/* 0x000e220000001400 */
/*0a20*/ @!P2 FMUL R20, R20, 4096 ; /* 0x458000001414a820 */
/* 0x004fc80000400000 */
/*0a30*/ FFMA R12, R20, UR7, R23 ; /* 0x00000007140c7c23 */
/* 0x000fe40008000017 */
/*0a40*/ @!P5 FMUL R19, R19, 4096 ; /* 0x458000001313d820 */
/* 0x002fc80000400000 */
/*0a50*/ FFMA R6, R19, UR7, R6 ; /* 0x0000000713067c23 */
/* 0x000fe40008000006 */
/*0a60*/ @!P1 FMUL R27, R27, 4096 ; /* 0x458000001b1b9820 */
/* 0x001fc80000400000 */
/*0a70*/ FFMA R7, R27, UR7, R24 ; /* 0x000000071b077c23 */
/* 0x000fe20008000018 */
/*0a80*/ @P0 BRA 0x290 ; /* 0xfffff80000000947 */
/* 0x000fea000383ffff */
/*0a90*/ ISETP.NE.AND P0, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */
/* 0x000fda0003f05270 */
/*0aa0*/ @!P0 BRA 0xea0 ; /* 0x000003f000008947 */
/* 0x000fea0003800000 */
/*0ab0*/ USHF.L.U32 UR4, UR4, 0x4, URZ ; /* 0x0000000404047899 */
/* 0x000fe2000800063f */
/*0ac0*/ MOV R20, c[0x0][0x164] ; /* 0x0000590000147a02 */
/* 0x000fca0000000f00 */
/*0ad0*/ MOV R5, UR4 ; /* 0x0000000400057c02 */
/* 0x000fc80008000f00 */
/*0ae0*/ LDC.64 R2, c[0x3][R5] ; /* 0x00c0000005027b82 */
/* 0x001e240000000a00 */
/*0af0*/ ULDC.64 UR4, c[0x3][UR4+0x8] ; /* 0x00c0020004047abb */
/* 0x000fe20008000a00 */
/*0b00*/ FFMA R3, R10, c[0x0][0x164], -R3 ; /* 0x000059000a037a23 */
/* 0x003fe40000000803 */
/*0b10*/ FFMA R9, R9, c[0x0][0x164], -R2 ; /* 0x0000590009097a23 */
/* 0x004fe40000000802 */
/*0b20*/ FFMA R0, R3, R3, UR4 ; /* 0x0000000403007e23 */
/* 0x000fc80008000003 */
/*0b30*/ FFMA R3, R9, R9, R0 ; /* 0x0000000909037223 */
/* 0x000fe40000000000 */
/*0b40*/ FFMA R9, R20, 16, R9 ; /* 0x4180000014097823 */
/* 0x000fc60000000009 */
/*0b50*/ FSETP.GEU.AND P6, PT, |R3|, 1.175494350822287508e-38, PT ; /* 0x008000000300780b */
/* 0x000fe20003fce200 */
/*0b60*/ FFMA R2, R9, R9, R0 ; /* 0x0000000909027223 */
/* 0x000fe40000000000 */
/*0b70*/ FFMA R9, R20, 16, R9 ; /* 0x4180000014097823 */
/* 0x000fc60000000009 */
/*0b80*/ FSETP.GEU.AND P5, PT, |R2|, 1.175494350822287508e-38, PT ; /* 0x008000000200780b */
/* 0x000fe20003fae200 */
/*0b90*/ FFMA R4, R9, R9, R0 ; /* 0x0000000909047223 */
/* 0x000fe40000000000 */
/*0ba0*/ FFMA R9, R20, 16, R9 ; /* 0x4180000014097823 */
/* 0x000fc60000000009 */
/*0bb0*/ FSETP.GEU.AND P4, PT, |R4|, 1.175494350822287508e-38, PT ; /* 0x008000000400780b */
/* 0x000fe20003f8e200 */
/*0bc0*/ @!P6 FMUL R3, R3, 16777216 ; /* 0x4b8000000303e820 */
/* 0x000fe40000400000 */
/*0bd0*/ FFMA R10, R9, R9, R0.reuse ; /* 0x00000009090a7223 */
/* 0x100fe40000000000 */
/*0be0*/ FFMA R9, R20, 16, R9 ; /* 0x4180000014097823 */
/* 0x000fe40000000009 */
/*0bf0*/ MUFU.RSQ R3, R3 ; /* 0x0000000300037308 */
/* 0x000e220000001400 */
/*0c00*/ FSETP.GEU.AND P3, PT, |R10|, 1.175494350822287508e-38, PT ; /* 0x008000000a00780b */
/* 0x000fe20003f6e200 */
/*0c10*/ FFMA R16, R9, R9, R0 ; /* 0x0000000909107223 */
/* 0x000fe40000000000 */
/*0c20*/ FFMA R9, R20, 16, R9 ; /* 0x4180000014097823 */
/* 0x000fc40000000009 */
/*0c30*/ @!P5 FMUL R2, R2, 16777216 ; /* 0x4b8000000202d820 */
/* 0x000fe20000400000 */
/*0c40*/ FSETP.GEU.AND P2, PT, |R16|, 1.175494350822287508e-38, PT ; /* 0x008000001000780b */
/* 0x000fe20003f4e200 */
/*0c50*/ FFMA R5, R20.reuse, 16, R9 ; /* 0x4180000014057823 */
/* 0x040fe40000000009 */
/*0c60*/ FFMA R19, R9, R9, R0.reuse ; /* 0x0000000909137223 */
/* 0x100fe40000000000 */
/*0c70*/ FFMA R9, R20, 16, R5 ; /* 0x4180000014097823 */
/* 0x000fe20000000005 */
/*0c80*/ MUFU.RSQ R2, R2 ; /* 0x0000000200027308 */
/* 0x000e620000001400 */
/*0c90*/ FFMA R5, R5, R5, R0.reuse ; /* 0x0000000505057223 */
/* 0x100fe20000000000 */
/*0ca0*/ FSETP.GEU.AND P1, PT, |R19|, 1.175494350822287508e-38, PT ; /* 0x008000001300780b */
/* 0x000fe20003f2e200 */
/*0cb0*/ FFMA R0, R9, R9, R0 ; /* 0x0000000909007223 */
/* 0x000fc40000000000 */
/*0cc0*/ @!P6 FMUL R3, R3, 4096 ; /* 0x458000000303e820 */
/* 0x001fe20000400000 */
/*0cd0*/ FSETP.GEU.AND P0, PT, |R5|, 1.175494350822287508e-38, PT ; /* 0x008000000500780b */
/* 0x000fe20003f0e200 */
/*0ce0*/ @!P3 FMUL R10, R10, 16777216 ; /* 0x4b8000000a0ab820 */
/* 0x000fe20000400000 */
/*0cf0*/ FSETP.GEU.AND P6, PT, |R0|, 1.175494350822287508e-38, PT ; /* 0x008000000000780b */
/* 0x000fe20003fce200 */
/*0d00*/ @!P2 FMUL R16, R16, 16777216 ; /* 0x4b8000001010a820 */
/* 0x000fe40000400000 */
/*0d10*/ @!P4 FMUL R4, R4, 16777216 ; /* 0x4b8000000404c820 */
/* 0x000fe40000400000 */
/*0d20*/ MUFU.RSQ R10, R10 ; /* 0x0000000a000a7308 */
/* 0x000e220000001400 */
/*0d30*/ FFMA R18, R3, UR5, R18 ; /* 0x0000000503127c23 */
/* 0x000fe40008000012 */
/*0d40*/ @!P1 FMUL R19, R19, 16777216 ; /* 0x4b80000013139820 */
/* 0x000fc40000400000 */
/*0d50*/ @!P5 FMUL R2, R2, 4096 ; /* 0x458000000202d820 */
/* 0x002fe40000400000 */
/*0d60*/ @!P0 FMUL R5, R5, 16777216 ; /* 0x4b80000005058820 */
/* 0x000fe20000400000 */
/*0d70*/ MUFU.RSQ R11, R4 ; /* 0x00000004000b7308 */
/* 0x000e620000001400 */
/*0d80*/ @!P6 FMUL R0, R0, 16777216 ; /* 0x4b8000000000e820 */
/* 0x000fe40000400000 */
/*0d90*/ FFMA R17, R2, UR5, R17 ; /* 0x0000000502117c23 */
/* 0x000fca0008000011 */
/*0da0*/ MUFU.RSQ R16, R16 ; /* 0x0000001000107308 */
/* 0x000ea20000001400 */
/*0db0*/ @!P3 FMUL R10, R10, 4096 ; /* 0x458000000a0ab820 */
/* 0x001fc80000400000 */
/*0dc0*/ FFMA R15, R10, UR5, R15 ; /* 0x000000050a0f7c23 */
/* 0x000fc6000800000f */
/*0dd0*/ MUFU.RSQ R19, R19 ; /* 0x0000001300137308 */
/* 0x000e220000001400 */
/*0de0*/ @!P4 FMUL R11, R11, 4096 ; /* 0x458000000b0bc820 */
/* 0x002fc80000400000 */
/*0df0*/ FFMA R14, R11, UR5, R14 ; /* 0x000000050b0e7c23 */
/* 0x000fc6000800000e */
/*0e00*/ MUFU.RSQ R5, R5 ; /* 0x0000000500057308 */
/* 0x000e620000001400 */
/*0e10*/ @!P2 FMUL R16, R16, 4096 ; /* 0x458000001010a820 */
/* 0x004fc80000400000 */
/*0e20*/ FFMA R13, R16, UR5, R13 ; /* 0x00000005100d7c23 */
/* 0x000fc6000800000d */
/*0e30*/ MUFU.RSQ R0, R0 ; /* 0x0000000000007308 */
/* 0x000ea20000001400 */
/*0e40*/ @!P1 FMUL R19, R19, 4096 ; /* 0x4580000013139820 */
/* 0x001fc80000400000 */
/*0e50*/ FFMA R6, R19, UR5, R6 ; /* 0x0000000513067c23 */
/* 0x000fe40008000006 */
/*0e60*/ @!P0 FMUL R5, R5, 4096 ; /* 0x4580000005058820 */
/* 0x002fc80000400000 */
/*0e70*/ FFMA R12, R5, UR5, R12 ; /* 0x00000005050c7c23 */
/* 0x000fe4000800000c */
/*0e80*/ @!P6 FMUL R0, R0, 4096 ; /* 0x458000000000e820 */
/* 0x004fc80000400000 */
/*0e90*/ FFMA R7, R0, UR5, R7 ; /* 0x0000000500077c23 */
/* 0x000fe40008000007 */
/*0ea0*/ MOV R9, 0x4 ; /* 0x0000000400097802 */
/* 0x004fca0000000f00 */
/*0eb0*/ IMAD.WIDE.U32 R2, R8, R9, c[0x0][0x168] ; /* 0x00005a0008027625 */
/* 0x001fca00078e0009 */
/*0ec0*/ LDG.E R5, [R2.64] ; /* 0x0000000a02057981 */
/* 0x000ea2000c1e1900 */
/*0ed0*/ IADD3 R4, R8, 0x10, RZ ; /* 0x0000001008047810 */
/* 0x000fe20007ffe0ff */
/*0ee0*/ FADD R19, R5, R18 ; /* 0x0000001205137221 */
/* 0x004fc80000000000 */
/*0ef0*/ IMAD.WIDE.U32 R4, R4, R9, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fe200078e0009 */
/*0f00*/ STG.E [R2.64], R19 ; /* 0x0000001302007986 */
/* 0x0001e8000c10190a */
/*0f10*/ LDG.E R0, [R4.64] ; /* 0x0000000a04007981 */
/* 0x000ea2000c1e1900 */
/*0f20*/ IADD3 R10, R8, 0x20, RZ ; /* 0x00000020080a7810 */
/* 0x002fca0007ffe0ff */
/*0f30*/ IMAD.WIDE.U32 R10, R10, R9, c[0x0][0x168] ; /* 0x00005a000a0a7625 */
/* 0x000fc800078e0009 */
/*0f40*/ FADD R21, R0, R17 ; /* 0x0000001100157221 */
/* 0x004fca0000000000 */
/*0f50*/ STG.E [R4.64], R21 ; /* 0x0000001504007986 */
/* 0x0003e8000c10190a */
/*0f60*/ LDG.E R17, [R10.64] ; /* 0x0000000a0a117981 */
/* 0x000ea2000c1e1900 */
/*0f70*/ IADD3 R16, R8, 0x30, RZ ; /* 0x0000003008107810 */
/* 0x000fe20007ffe0ff */
/*0f80*/ FADD R23, R17, R14 ; /* 0x0000000e11177221 */
/* 0x004fc80000000000 */
/*0f90*/ IMAD.WIDE.U32 R16, R16, R9, c[0x0][0x168] ; /* 0x00005a0010107625 */
/* 0x000fe200078e0009 */
/*0fa0*/ STG.E [R10.64], R23 ; /* 0x000000170a007986 */
/* 0x0005e8000c10190a */
/*0fb0*/ LDG.E R0, [R16.64] ; /* 0x0000000a10007981 */
/* 0x000ee2000c1e1900 */
/*0fc0*/ IADD3 R2, R8, 0x40, RZ ; /* 0x0000004008027810 */
/* 0x001fca0007ffe0ff */
/*0fd0*/ IMAD.WIDE.U32 R2, R2, R9, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fc800078e0009 */
/*0fe0*/ FADD R15, R0, R15 ; /* 0x0000000f000f7221 */
/* 0x008fca0000000000 */
/*0ff0*/ STG.E [R16.64], R15 ; /* 0x0000000f10007986 */
/* 0x0001e8000c10190a */
/*1000*/ LDG.E R0, [R2.64] ; /* 0x0000000a02007981 */
/* 0x000ee2000c1e1900 */
/*1010*/ IADD3 R4, R8, 0x50, RZ ; /* 0x0000005008047810 */
/* 0x002fca0007ffe0ff */
/*1020*/ IMAD.WIDE.U32 R4, R4, R9, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fc800078e0009 */
/*1030*/ FADD R13, R0, R13 ; /* 0x0000000d000d7221 */
/* 0x008fca0000000000 */
/*1040*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */
/* 0x000fe8000c10190a */
/*1050*/ LDG.E R11, [R4.64] ; /* 0x0000000a040b7981 */
/* 0x004ea2000c1e1900 */
/*1060*/ IADD3 R10, R8, 0x60, RZ ; /* 0x00000060080a7810 */
/* 0x000fe20007ffe0ff */
/*1070*/ FADD R19, R11, R6 ; /* 0x000000060b137221 */
/* 0x004fc80000000000 */
/*1080*/ IMAD.WIDE.U32 R10, R10, R9, c[0x0][0x168] ; /* 0x00005a000a0a7625 */
/* 0x000fe200078e0009 */
/*1090*/ STG.E [R4.64], R19 ; /* 0x0000001304007986 */
/* 0x000fe8000c10190a */
/*10a0*/ LDG.E R15, [R10.64] ; /* 0x0000000a0a0f7981 */
/* 0x001ea2000c1e1900 */
/*10b0*/ IADD3 R8, R8, 0x70, RZ ; /* 0x0000007008087810 */
/* 0x000fca0007ffe0ff */
/*10c0*/ IMAD.WIDE.U32 R8, R8, R9, c[0x0][0x168] ; /* 0x00005a0008087625 */
/* 0x000fc800078e0009 */
/*10d0*/ FADD R15, R15, R12 ; /* 0x0000000c0f0f7221 */
/* 0x004fca0000000000 */
/*10e0*/ STG.E [R10.64], R15 ; /* 0x0000000f0a007986 */
/* 0x000fe8000c10190a */
/*10f0*/ LDG.E R0, [R8.64] ; /* 0x0000000a08007981 */
/* 0x000ea4000c1e1900 */
/*1100*/ FADD R7, R0, R7 ; /* 0x0000000700077221 */
/* 0x004fca0000000000 */
/*1110*/ STG.E [R8.64], R7 ; /* 0x0000000708007986 */
/* 0x000fe2000c10190a */
/*1120*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*1130*/ BRA 0x1130; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*1140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*11a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*11b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*11c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*11d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*11e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*11f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* CUDA accelerated coulombic potential grid test code
* John E. Stone and Chris Rodrigues
* http://www.ks.uiuc.edu/~johns/
*/
#include <stdio.h>
#include <stdlib.h>
#define CUERR { cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \
return -1; }}
// max constant buffer size is 64KB, minus whatever
// the CUDA runtime and compiler are using that we don't know about
// At 16 bytes for atom, for this program 4070 atoms is about the max
// we can store in the constant buffer.
#define MAXATOMS 4000
__constant__ float4 atominfo[MAXATOMS];
#define UNROLLX 8
#define UNROLLY 1
#define BLOCKSIZEX 16
#define BLOCKSIZEY 16
#define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY
// This kernel calculates coulombic potential at each grid point and
// stores the results in the output array.
//
// This kernel was written by Chris Rodrigues of Wen-mei's group
//
__global__ void cenergy(int numatoms, float gridspacing, float * energygrid) {
unsigned int xindex = __umul24(blockIdx.x, blockDim.x) * UNROLLX
+ threadIdx.x;
unsigned int yindex = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
unsigned int outaddr = (__umul24(gridDim.x, blockDim.x) * UNROLLX) * yindex
+ xindex;
float coory = gridspacing * yindex;
float coorx = gridspacing * xindex;
float energyvalx1=0.0f;
float energyvalx2=0.0f;
float energyvalx3=0.0f;
float energyvalx4=0.0f;
#if UNROLLX == 8
float energyvalx5=0.0f;
float energyvalx6=0.0f;
float energyvalx7=0.0f;
float energyvalx8=0.0f;
#endif
float gridspacing_u = gridspacing * BLOCKSIZEX;
//
// XXX 59/8 FLOPS per atom
//
int atomid;
for (atomid=0; atomid<numatoms; atomid++) {
float dy = coory - atominfo[atomid].y;
float dyz2 = (dy * dy) + atominfo[atomid].z;
float atomq=atominfo[atomid].w;
float dx1 = coorx - atominfo[atomid].x;
float dx2 = dx1 + gridspacing_u;
float dx3 = dx2 + gridspacing_u;
float dx4 = dx3 + gridspacing_u;
#if UNROLLX == 8
float dx5 = dx4 + gridspacing_u;
float dx6 = dx5 + gridspacing_u;
float dx7 = dx6 + gridspacing_u;
float dx8 = dx7 + gridspacing_u;
#endif
energyvalx1 += atomq * rsqrtf(dx1*dx1 + dyz2);
energyvalx2 += atomq * rsqrtf(dx2*dx2 + dyz2);
energyvalx3 += atomq * rsqrtf(dx3*dx3 + dyz2);
energyvalx4 += atomq * rsqrtf(dx4*dx4 + dyz2);
#if UNROLLX == 8
energyvalx5 += atomq * rsqrtf(dx5*dx5 + dyz2);
energyvalx6 += atomq * rsqrtf(dx6*dx6 + dyz2);
energyvalx7 += atomq * rsqrtf(dx7*dx7 + dyz2);
energyvalx8 += atomq * rsqrtf(dx8*dx8 + dyz2);
#endif
}
energygrid[outaddr ] += energyvalx1;
energygrid[outaddr+1*BLOCKSIZEX] += energyvalx2;
energygrid[outaddr+2*BLOCKSIZEX] += energyvalx3;
energygrid[outaddr+3*BLOCKSIZEX] += energyvalx4;
#if UNROLLX == 8
energygrid[outaddr+4*BLOCKSIZEX] += energyvalx5;
energygrid[outaddr+5*BLOCKSIZEX] += energyvalx6;
energygrid[outaddr+6*BLOCKSIZEX] += energyvalx7;
energygrid[outaddr+7*BLOCKSIZEX] += energyvalx8;
#endif
}
int copyatomstoconstbuf(float *atoms, int count, float zplane) {
CUERR // check and clear any existing errors
if (count > MAXATOMS) {
printf("Atom count exceeds constant buffer storage capacity\n");
return -1;
}
float atompre[4*MAXATOMS];
int i;
for (i=0; i<count*4; i+=4) {
atompre[i ] = atoms[i ];
atompre[i + 1] = atoms[i + 1];
float dz = zplane - atoms[i + 2];
atompre[i + 2] = dz*dz;
atompre[i + 3] = atoms[i + 3];
}
cudaMemcpyToSymbol(atominfo, atompre, count * 4 * sizeof(float), 0);
CUERR // check and clear any existing errors
return 0;
}
int initatoms(float **atombuf, int count, dim3 volsize, float gridspacing) {
dim3 size;
int i;
float *atoms;
atoms = (float *) malloc(count * 4 * sizeof(float));
*atombuf = atoms;
// compute grid dimensions in angstroms
size.x = (unsigned int) gridspacing * volsize.x;
size.y = (unsigned int) gridspacing * volsize.y;
size.z = (unsigned int) gridspacing * volsize.z;
for (i=0; i<count; i++) {
int addr = i * 4;
atoms[addr ] = (rand() / (float) RAND_MAX) * size.x;
atoms[addr + 1] = (rand() / (float) RAND_MAX) * size.y;
atoms[addr + 2] = (rand() / (float) RAND_MAX) * size.z;
atoms[addr + 3] = ((rand() / (float) RAND_MAX) * 2.0) - 1.0; // charge
}
return 0;
} | .file "tmpxft_00025b6d_00000000-6_cuenergy.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "CUDA error: %s, line %d\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Atom count exceeds constant buffer storage capacity\n"
.text
.globl _Z19copyatomstoconstbufPfif
.type _Z19copyatomstoconstbufPfif, @function
_Z19copyatomstoconstbufPfif:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
leaq -61440(%rsp), %r11
.cfi_def_cfa 11, 61464
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $2600, %rsp
.cfi_def_cfa_offset 64064
movq %rdi, %rbx
movl %esi, %ebp
movss %xmm0, 12(%rsp)
movq %fs:40, %rax
movq %rax, 64024(%rsp)
xorl %eax, %eax
call cudaGetLastError@PLT
testl %eax, %eax
jne .L15
cmpl $4000, %ebp
jg .L6
leal 0(,%rbp,4), %edx
leaq 16(%rsp), %rcx
movl $0, %eax
testl %edx, %edx
jle .L8
.L9:
movss (%rbx,%rax,4), %xmm0
movss %xmm0, (%rcx)
movss 4(%rbx,%rax,4), %xmm0
movss %xmm0, 4(%rcx)
movss 12(%rsp), %xmm0
subss 8(%rbx,%rax,4), %xmm0
mulss %xmm0, %xmm0
movss %xmm0, 8(%rcx)
movss 12(%rbx,%rax,4), %xmm0
movss %xmm0, 12(%rcx)
addq $4, %rax
addq $16, %rcx
cmpl %eax, %edx
jg .L9
.L8:
movslq %edx, %rdx
salq $2, %rdx
leaq 16(%rsp), %rsi
movl $1, %r8d
movl $0, %ecx
leaq _ZL8atominfo(%rip), %rdi
call cudaMemcpyToSymbol@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L16
movl $0, %eax
.L3:
movq 64024(%rsp), %rdx
subq %fs:40, %rdx
jne .L17
addq $64040, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $111, %ecx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
jmp .L3
.L6:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
jmp .L3
.L16:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $129, %ecx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
jmp .L3
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z19copyatomstoconstbufPfif, .-_Z19copyatomstoconstbufPfif
.globl _Z9initatomsPPfi4dim3f
.type _Z9initatomsPPfi4dim3f, @function
_Z9initatomsPPfi4dim3f:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %rbp
movl %esi, %ebx
movss %xmm0, 12(%rsp)
movq %rdx, 16(%rsp)
movl %ecx, 24(%rsp)
leal 0(,%rsi,4), %r15d
movslq %r15d, %r15
salq $2, %r15
movq %r15, %rdi
call malloc@PLT
movq %rax, 0(%rbp)
cvttss2siq 12(%rsp), %r12
movl %r12d, %r13d
imull 16(%rsp), %r13d
movl %r12d, %r14d
imull 20(%rsp), %r14d
imull 24(%rsp), %r12d
testl %ebx, %ebx
jle .L19
movq %rax, %rbx
addq %rax, %r15
.L26:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC2(%rip), %xmm0
movl %r13d, %eax
pxor %xmm1, %xmm1
cvtsi2ssq %rax, %xmm1
mulss %xmm1, %xmm0
movss %xmm0, (%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC2(%rip), %xmm0
movl %r14d, %eax
pxor %xmm1, %xmm1
cvtsi2ssq %rax, %xmm1
mulss %xmm1, %xmm0
movss %xmm0, 4(%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC2(%rip), %xmm0
movl %r12d, %eax
pxor %xmm1, %xmm1
cvtsi2ssq %rax, %xmm1
mulss %xmm1, %xmm0
movss %xmm0, 8(%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC2(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
addsd %xmm0, %xmm0
subsd .LC3(%rip), %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 12(%rbx)
addq $16, %rbx
cmpq %r15, %rbx
jne .L26
.L19:
movl $0, %eax
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z9initatomsPPfi4dim3f, .-_Z9initatomsPPfi4dim3f
.globl _Z28__device_stub__Z7cenergyifPfifPf
.type _Z28__device_stub__Z7cenergyifPfifPf, @function
_Z28__device_stub__Z7cenergyifPfifPf:
.LFB2083:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z7cenergyifPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z28__device_stub__Z7cenergyifPfifPf, .-_Z28__device_stub__Z7cenergyifPfifPf
.globl _Z7cenergyifPf
.type _Z7cenergyifPf, @function
_Z7cenergyifPf:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z7cenergyifPfifPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z7cenergyifPf, .-_Z7cenergyifPf
.section .rodata.str1.1
.LC4:
.string "_Z7cenergyifPf"
.LC5:
.string "atominfo"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z7cenergyifPf(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $64000, %r9d
movl $0, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _ZL8atominfo(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL8atominfo
.comm _ZL8atominfo,64000,32
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 805306368
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC3:
.long 0
.long 1072693248
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* CUDA accelerated coulombic potential grid test code
* John E. Stone and Chris Rodrigues
* http://www.ks.uiuc.edu/~johns/
*/
#include <stdio.h>
#include <stdlib.h>
#define CUERR { cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \
return -1; }}
// max constant buffer size is 64KB, minus whatever
// the CUDA runtime and compiler are using that we don't know about
// At 16 bytes for atom, for this program 4070 atoms is about the max
// we can store in the constant buffer.
#define MAXATOMS 4000
__constant__ float4 atominfo[MAXATOMS];
#define UNROLLX 8
#define UNROLLY 1
#define BLOCKSIZEX 16
#define BLOCKSIZEY 16
#define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY
// This kernel calculates coulombic potential at each grid point and
// stores the results in the output array.
//
// This kernel was written by Chris Rodrigues of Wen-mei's group
//
__global__ void cenergy(int numatoms, float gridspacing, float * energygrid) {
unsigned int xindex = __umul24(blockIdx.x, blockDim.x) * UNROLLX
+ threadIdx.x;
unsigned int yindex = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
unsigned int outaddr = (__umul24(gridDim.x, blockDim.x) * UNROLLX) * yindex
+ xindex;
float coory = gridspacing * yindex;
float coorx = gridspacing * xindex;
float energyvalx1=0.0f;
float energyvalx2=0.0f;
float energyvalx3=0.0f;
float energyvalx4=0.0f;
#if UNROLLX == 8
float energyvalx5=0.0f;
float energyvalx6=0.0f;
float energyvalx7=0.0f;
float energyvalx8=0.0f;
#endif
float gridspacing_u = gridspacing * BLOCKSIZEX;
//
// XXX 59/8 FLOPS per atom
//
int atomid;
for (atomid=0; atomid<numatoms; atomid++) {
float dy = coory - atominfo[atomid].y;
float dyz2 = (dy * dy) + atominfo[atomid].z;
float atomq=atominfo[atomid].w;
float dx1 = coorx - atominfo[atomid].x;
float dx2 = dx1 + gridspacing_u;
float dx3 = dx2 + gridspacing_u;
float dx4 = dx3 + gridspacing_u;
#if UNROLLX == 8
float dx5 = dx4 + gridspacing_u;
float dx6 = dx5 + gridspacing_u;
float dx7 = dx6 + gridspacing_u;
float dx8 = dx7 + gridspacing_u;
#endif
energyvalx1 += atomq * rsqrtf(dx1*dx1 + dyz2);
energyvalx2 += atomq * rsqrtf(dx2*dx2 + dyz2);
energyvalx3 += atomq * rsqrtf(dx3*dx3 + dyz2);
energyvalx4 += atomq * rsqrtf(dx4*dx4 + dyz2);
#if UNROLLX == 8
energyvalx5 += atomq * rsqrtf(dx5*dx5 + dyz2);
energyvalx6 += atomq * rsqrtf(dx6*dx6 + dyz2);
energyvalx7 += atomq * rsqrtf(dx7*dx7 + dyz2);
energyvalx8 += atomq * rsqrtf(dx8*dx8 + dyz2);
#endif
}
energygrid[outaddr ] += energyvalx1;
energygrid[outaddr+1*BLOCKSIZEX] += energyvalx2;
energygrid[outaddr+2*BLOCKSIZEX] += energyvalx3;
energygrid[outaddr+3*BLOCKSIZEX] += energyvalx4;
#if UNROLLX == 8
energygrid[outaddr+4*BLOCKSIZEX] += energyvalx5;
energygrid[outaddr+5*BLOCKSIZEX] += energyvalx6;
energygrid[outaddr+6*BLOCKSIZEX] += energyvalx7;
energygrid[outaddr+7*BLOCKSIZEX] += energyvalx8;
#endif
}
int copyatomstoconstbuf(float *atoms, int count, float zplane) {
CUERR // check and clear any existing errors
if (count > MAXATOMS) {
printf("Atom count exceeds constant buffer storage capacity\n");
return -1;
}
float atompre[4*MAXATOMS];
int i;
for (i=0; i<count*4; i+=4) {
atompre[i ] = atoms[i ];
atompre[i + 1] = atoms[i + 1];
float dz = zplane - atoms[i + 2];
atompre[i + 2] = dz*dz;
atompre[i + 3] = atoms[i + 3];
}
cudaMemcpyToSymbol(atominfo, atompre, count * 4 * sizeof(float), 0);
CUERR // check and clear any existing errors
return 0;
}
int initatoms(float **atombuf, int count, dim3 volsize, float gridspacing) {
dim3 size;
int i;
float *atoms;
atoms = (float *) malloc(count * 4 * sizeof(float));
*atombuf = atoms;
// compute grid dimensions in angstroms
size.x = (unsigned int) gridspacing * volsize.x;
size.y = (unsigned int) gridspacing * volsize.y;
size.z = (unsigned int) gridspacing * volsize.z;
for (i=0; i<count; i++) {
int addr = i * 4;
atoms[addr ] = (rand() / (float) RAND_MAX) * size.x;
atoms[addr + 1] = (rand() / (float) RAND_MAX) * size.y;
atoms[addr + 2] = (rand() / (float) RAND_MAX) * size.z;
atoms[addr + 3] = ((rand() / (float) RAND_MAX) * 2.0) - 1.0; // charge
}
return 0;
} | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* CUDA accelerated coulombic potential grid test code
* John E. Stone and Chris Rodrigues
* http://www.ks.uiuc.edu/~johns/
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define CUERR { hipError_t err; \
if ((err = hipGetLastError()) != hipSuccess) { \
printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__); \
return -1; }}
// max constant buffer size is 64KB, minus whatever
// the CUDA runtime and compiler are using that we don't know about
// At 16 bytes for atom, for this program 4070 atoms is about the max
// we can store in the constant buffer.
#define MAXATOMS 4000
__constant__ float4 atominfo[MAXATOMS];
#define UNROLLX 8
#define UNROLLY 1
#define BLOCKSIZEX 16
#define BLOCKSIZEY 16
#define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY
// This kernel calculates coulombic potential at each grid point and
// stores the results in the output array.
//
// This kernel was written by Chris Rodrigues of Wen-mei's group
//
__global__ void cenergy(int numatoms, float gridspacing, float * energygrid) {
unsigned int xindex = __umul24(blockIdx.x, blockDim.x) * UNROLLX
+ threadIdx.x;
unsigned int yindex = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
unsigned int outaddr = (__umul24(gridDim.x, blockDim.x) * UNROLLX) * yindex
+ xindex;
float coory = gridspacing * yindex;
float coorx = gridspacing * xindex;
float energyvalx1=0.0f;
float energyvalx2=0.0f;
float energyvalx3=0.0f;
float energyvalx4=0.0f;
#if UNROLLX == 8
float energyvalx5=0.0f;
float energyvalx6=0.0f;
float energyvalx7=0.0f;
float energyvalx8=0.0f;
#endif
float gridspacing_u = gridspacing * BLOCKSIZEX;
//
// XXX 59/8 FLOPS per atom
//
int atomid;
for (atomid=0; atomid<numatoms; atomid++) {
float dy = coory - atominfo[atomid].y;
float dyz2 = (dy * dy) + atominfo[atomid].z;
float atomq=atominfo[atomid].w;
float dx1 = coorx - atominfo[atomid].x;
float dx2 = dx1 + gridspacing_u;
float dx3 = dx2 + gridspacing_u;
float dx4 = dx3 + gridspacing_u;
#if UNROLLX == 8
float dx5 = dx4 + gridspacing_u;
float dx6 = dx5 + gridspacing_u;
float dx7 = dx6 + gridspacing_u;
float dx8 = dx7 + gridspacing_u;
#endif
energyvalx1 += atomq * rsqrtf(dx1*dx1 + dyz2);
energyvalx2 += atomq * rsqrtf(dx2*dx2 + dyz2);
energyvalx3 += atomq * rsqrtf(dx3*dx3 + dyz2);
energyvalx4 += atomq * rsqrtf(dx4*dx4 + dyz2);
#if UNROLLX == 8
energyvalx5 += atomq * rsqrtf(dx5*dx5 + dyz2);
energyvalx6 += atomq * rsqrtf(dx6*dx6 + dyz2);
energyvalx7 += atomq * rsqrtf(dx7*dx7 + dyz2);
energyvalx8 += atomq * rsqrtf(dx8*dx8 + dyz2);
#endif
}
energygrid[outaddr ] += energyvalx1;
energygrid[outaddr+1*BLOCKSIZEX] += energyvalx2;
energygrid[outaddr+2*BLOCKSIZEX] += energyvalx3;
energygrid[outaddr+3*BLOCKSIZEX] += energyvalx4;
#if UNROLLX == 8
energygrid[outaddr+4*BLOCKSIZEX] += energyvalx5;
energygrid[outaddr+5*BLOCKSIZEX] += energyvalx6;
energygrid[outaddr+6*BLOCKSIZEX] += energyvalx7;
energygrid[outaddr+7*BLOCKSIZEX] += energyvalx8;
#endif
}
int copyatomstoconstbuf(float *atoms, int count, float zplane) {
CUERR // check and clear any existing errors
if (count > MAXATOMS) {
printf("Atom count exceeds constant buffer storage capacity\n");
return -1;
}
float atompre[4*MAXATOMS];
int i;
for (i=0; i<count*4; i+=4) {
atompre[i ] = atoms[i ];
atompre[i + 1] = atoms[i + 1];
float dz = zplane - atoms[i + 2];
atompre[i + 2] = dz*dz;
atompre[i + 3] = atoms[i + 3];
}
hipMemcpyToSymbol(HIP_SYMBOL(atominfo), atompre, count * 4 * sizeof(float), 0);
CUERR // check and clear any existing errors
return 0;
}
int initatoms(float **atombuf, int count, dim3 volsize, float gridspacing) {
dim3 size;
int i;
float *atoms;
atoms = (float *) malloc(count * 4 * sizeof(float));
*atombuf = atoms;
// compute grid dimensions in angstroms
size.x = (unsigned int) gridspacing * volsize.x;
size.y = (unsigned int) gridspacing * volsize.y;
size.z = (unsigned int) gridspacing * volsize.z;
for (i=0; i<count; i++) {
int addr = i * 4;
atoms[addr ] = (rand() / (float) RAND_MAX) * size.x;
atoms[addr + 1] = (rand() / (float) RAND_MAX) * size.y;
atoms[addr + 2] = (rand() / (float) RAND_MAX) * size.z;
atoms[addr + 3] = ((rand() / (float) RAND_MAX) * 2.0) - 1.0; // charge
}
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* CUDA accelerated coulombic potential grid test code
* John E. Stone and Chris Rodrigues
* http://www.ks.uiuc.edu/~johns/
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define CUERR { hipError_t err; \
if ((err = hipGetLastError()) != hipSuccess) { \
printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__); \
return -1; }}
// max constant buffer size is 64KB, minus whatever
// the CUDA runtime and compiler are using that we don't know about
// At 16 bytes for atom, for this program 4070 atoms is about the max
// we can store in the constant buffer.
#define MAXATOMS 4000
__constant__ float4 atominfo[MAXATOMS];
#define UNROLLX 8
#define UNROLLY 1
#define BLOCKSIZEX 16
#define BLOCKSIZEY 16
#define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY
// This kernel calculates coulombic potential at each grid point and
// stores the results in the output array.
//
// This kernel was written by Chris Rodrigues of Wen-mei's group
//
__global__ void cenergy(int numatoms, float gridspacing, float * energygrid) {
unsigned int xindex = __umul24(blockIdx.x, blockDim.x) * UNROLLX
+ threadIdx.x;
unsigned int yindex = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
unsigned int outaddr = (__umul24(gridDim.x, blockDim.x) * UNROLLX) * yindex
+ xindex;
float coory = gridspacing * yindex;
float coorx = gridspacing * xindex;
float energyvalx1=0.0f;
float energyvalx2=0.0f;
float energyvalx3=0.0f;
float energyvalx4=0.0f;
#if UNROLLX == 8
float energyvalx5=0.0f;
float energyvalx6=0.0f;
float energyvalx7=0.0f;
float energyvalx8=0.0f;
#endif
float gridspacing_u = gridspacing * BLOCKSIZEX;
//
// XXX 59/8 FLOPS per atom
//
int atomid;
for (atomid=0; atomid<numatoms; atomid++) {
float dy = coory - atominfo[atomid].y;
float dyz2 = (dy * dy) + atominfo[atomid].z;
float atomq=atominfo[atomid].w;
float dx1 = coorx - atominfo[atomid].x;
float dx2 = dx1 + gridspacing_u;
float dx3 = dx2 + gridspacing_u;
float dx4 = dx3 + gridspacing_u;
#if UNROLLX == 8
float dx5 = dx4 + gridspacing_u;
float dx6 = dx5 + gridspacing_u;
float dx7 = dx6 + gridspacing_u;
float dx8 = dx7 + gridspacing_u;
#endif
energyvalx1 += atomq * rsqrtf(dx1*dx1 + dyz2);
energyvalx2 += atomq * rsqrtf(dx2*dx2 + dyz2);
energyvalx3 += atomq * rsqrtf(dx3*dx3 + dyz2);
energyvalx4 += atomq * rsqrtf(dx4*dx4 + dyz2);
#if UNROLLX == 8
energyvalx5 += atomq * rsqrtf(dx5*dx5 + dyz2);
energyvalx6 += atomq * rsqrtf(dx6*dx6 + dyz2);
energyvalx7 += atomq * rsqrtf(dx7*dx7 + dyz2);
energyvalx8 += atomq * rsqrtf(dx8*dx8 + dyz2);
#endif
}
energygrid[outaddr ] += energyvalx1;
energygrid[outaddr+1*BLOCKSIZEX] += energyvalx2;
energygrid[outaddr+2*BLOCKSIZEX] += energyvalx3;
energygrid[outaddr+3*BLOCKSIZEX] += energyvalx4;
#if UNROLLX == 8
energygrid[outaddr+4*BLOCKSIZEX] += energyvalx5;
energygrid[outaddr+5*BLOCKSIZEX] += energyvalx6;
energygrid[outaddr+6*BLOCKSIZEX] += energyvalx7;
energygrid[outaddr+7*BLOCKSIZEX] += energyvalx8;
#endif
}
int copyatomstoconstbuf(float *atoms, int count, float zplane) {
CUERR // check and clear any existing errors
if (count > MAXATOMS) {
printf("Atom count exceeds constant buffer storage capacity\n");
return -1;
}
float atompre[4*MAXATOMS];
int i;
for (i=0; i<count*4; i+=4) {
atompre[i ] = atoms[i ];
atompre[i + 1] = atoms[i + 1];
float dz = zplane - atoms[i + 2];
atompre[i + 2] = dz*dz;
atompre[i + 3] = atoms[i + 3];
}
hipMemcpyToSymbol(HIP_SYMBOL(atominfo), atompre, count * 4 * sizeof(float), 0);
CUERR // check and clear any existing errors
return 0;
}
int initatoms(float **atombuf, int count, dim3 volsize, float gridspacing) {
dim3 size;
int i;
float *atoms;
atoms = (float *) malloc(count * 4 * sizeof(float));
*atombuf = atoms;
// compute grid dimensions in angstroms
size.x = (unsigned int) gridspacing * volsize.x;
size.y = (unsigned int) gridspacing * volsize.y;
size.z = (unsigned int) gridspacing * volsize.z;
for (i=0; i<count; i++) {
int addr = i * 4;
atoms[addr ] = (rand() / (float) RAND_MAX) * size.x;
atoms[addr + 1] = (rand() / (float) RAND_MAX) * size.y;
atoms[addr + 2] = (rand() / (float) RAND_MAX) * size.z;
atoms[addr + 3] = ((rand() / (float) RAND_MAX) * 2.0) - 1.0; // charge
}
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7cenergyifPf
.globl _Z7cenergyifPf
.p2align 8
.type _Z7cenergyifPf,@function
_Z7cenergyifPf:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s10, s[0:1], 0x0
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_lshl_b32 s3, s14, 3
s_and_b32 s4, s15, 0xffffff
s_and_b32 s3, s3, 0x7fffff8
s_waitcnt lgkmcnt(0)
s_and_b32 s7, s2, 0xffff
s_lshr_b32 s2, s2, 16
v_mad_u64_u32 v[0:1], null, s3, s7, v[2:3]
v_mad_u64_u32 v[1:2], null, s4, s2, v[3:4]
s_cmp_lt_i32 s10, 1
s_cbranch_scc1 .LBB0_3
s_load_b32 s2, s[0:1], 0x4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cvt_f32_u32_e32 v9, v1
v_cvt_f32_u32_e32 v11, v0
v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, 0
v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v5, 0
v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v7, 0
v_mov_b32_e32 v8, 0
s_getpc_b64 s[8:9]
s_add_u32 s8, s8, atominfo@rel32@lo+16
s_addc_u32 s9, s9, atominfo@rel32@hi+24
s_waitcnt lgkmcnt(0)
v_dual_mul_f32 v11, s2, v11 :: v_dual_mul_f32 v10, s2, v9
v_mov_b32_e32 v9, 0
v_mul_f32_e64 v12, 0x41800000, s2
.LBB0_2:
s_add_u32 s2, s8, -12
s_addc_u32 s3, s9, -1
s_add_u32 s4, s8, -8
s_addc_u32 s5, s9, -1
s_add_u32 s12, s8, -4
s_addc_u32 s13, s9, -1
s_clause 0x3
s_load_b32 s4, s[4:5], 0x0
s_load_b32 s5, s[12:13], 0x0
s_load_b32 s11, s[8:9], 0x0
s_load_b32 s2, s[2:3], 0x0
s_add_i32 s10, s10, -1
s_add_u32 s8, s8, 16
s_addc_u32 s9, s9, 0
s_cmp_lg_u32 s10, 0
s_waitcnt lgkmcnt(0)
v_dual_subrev_f32 v13, s4, v10 :: v_dual_subrev_f32 v14, s2, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f32 v13, v13, v13, s5
v_add_f32_e32 v15, v12, v14
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v14, v14, v14, v13
v_mul_f32_e32 v17, 0x4b800000, v14
v_cmp_gt_f32_e32 vcc_lo, 0x800000, v14
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v14, v14, v17, vcc_lo
v_add_f32_e32 v16, v12, v15
v_fma_f32 v15, v15, v15, v13
v_rsq_f32_e32 v14, v14
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f32_e32 v18, v12, v16
v_cmp_gt_f32_e64 s2, 0x800000, v15
v_fma_f32 v16, v16, v16, v13
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
v_add_f32_e32 v17, v12, v18
v_mul_f32_e32 v19, 0x4b800000, v15
v_fma_f32 v18, v18, v18, v13
v_cmp_gt_f32_e64 s3, 0x800000, v16
s_delay_alu instid0(TRANS32_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_4)
v_mul_f32_e32 v22, 0x45800000, v14
v_add_f32_e32 v20, v12, v17
v_cndmask_b32_e64 v15, v15, v19, s2
v_mul_f32_e32 v19, 0x4b800000, v16
v_cmp_gt_f32_e64 s4, 0x800000, v18
v_dual_cndmask_b32 v14, v14, v22 :: v_dual_add_f32 v21, v12, v20
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_rsq_f32_e32 v15, v15
v_cndmask_b32_e64 v16, v16, v19, s3
v_mul_f32_e32 v19, 0x4b800000, v18
v_fma_f32 v20, v20, v20, v13
v_dual_add_f32 v23, v12, v21 :: v_dual_fmac_f32 v2, s11, v14
v_fma_f32 v21, v21, v21, v13
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(TRANS32_DEP_2)
v_cndmask_b32_e64 v18, v18, v19, s4
v_rsq_f32_e32 v16, v16
v_cmp_gt_f32_e64 s6, 0x800000, v20
v_mul_f32_e32 v24, 0x45800000, v15
v_fma_f32 v17, v17, v17, v13
v_dual_fmac_f32 v13, v23, v23 :: v_dual_mul_f32 v22, 0x4b800000, v21
v_cmp_gt_f32_e32 vcc_lo, 0x800000, v21
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cndmask_b32_e64 v15, v15, v24, s2
v_mul_f32_e32 v19, 0x4b800000, v17
v_cmp_gt_f32_e64 s5, 0x800000, v17
v_mul_f32_e32 v24, 0x4b800000, v13
v_cmp_gt_f32_e64 s2, 0x800000, v13
v_rsq_f32_e32 v18, v18
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cndmask_b32_e64 v17, v17, v19, s5
v_mul_f32_e32 v19, 0x4b800000, v20
v_cndmask_b32_e64 v13, v13, v24, s2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_rsq_f32_e32 v17, v17
v_cndmask_b32_e64 v19, v20, v19, s6
v_dual_cndmask_b32 v20, v21, v22 :: v_dual_mul_f32 v23, 0x45800000, v16
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_rsq_f32_e32 v13, v13
v_rsq_f32_e32 v14, v19
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_rsq_f32_e32 v19, v20
v_cndmask_b32_e64 v16, v16, v23, s3
v_fmac_f32_e32 v3, s11, v15
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(TRANS32_DEP_3)
v_dual_mul_f32 v15, 0x45800000, v18 :: v_dual_fmac_f32 v4, s11, v16
v_mul_f32_e32 v22, 0x45800000, v13
v_mul_f32_e32 v16, 0x45800000, v17
s_waitcnt_depctr 0xfff
v_dual_mul_f32 v20, 0x45800000, v14 :: v_dual_mul_f32 v21, 0x45800000, v19
v_cndmask_b32_e64 v15, v18, v15, s4
v_cndmask_b32_e64 v13, v13, v22, s2
v_cndmask_b32_e64 v16, v17, v16, s5
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cndmask_b32_e64 v14, v14, v20, s6
v_cndmask_b32_e32 v17, v19, v21, vcc_lo
v_dual_fmac_f32 v9, s11, v13 :: v_dual_fmac_f32 v6, s11, v16
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_dual_fmac_f32 v5, s11, v15 :: v_dual_fmac_f32 v8, s11, v17
v_fmac_f32_e32 v7, s11, v14
s_cbranch_scc1 .LBB0_2
s_branch .LBB0_4
.LBB0_3:
v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, 0
v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v5, 0
v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v7, 0
v_dual_mov_b32 v8, 0 :: v_dual_mov_b32 v9, 0
.LBB0_4:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x10
s_load_b64 s[0:1], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_lshl_b32 s2, s2, 3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, 0x7fffff8
s_mul_i32 s2, s2, s7
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[10:11], null, s2, v1, v[0:1]
v_dual_mov_b32 v11, 0 :: v_dual_add_nc_u32 v0, 16, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_dual_mov_b32 v1, v11 :: v_dual_add_nc_u32 v12, 32, v10
v_lshlrev_b64 v[16:17], 2, v[10:11]
v_dual_mov_b32 v13, v11 :: v_dual_add_nc_u32 v14, 48, v10
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_dual_mov_b32 v15, v11 :: v_dual_add_nc_u32 v18, 64, v10
s_delay_alu instid0(VALU_DEP_3)
v_lshlrev_b64 v[12:13], 2, v[12:13]
v_add_co_u32 v16, vcc_lo, s0, v16
v_dual_mov_b32 v19, v11 :: v_dual_add_nc_u32 v20, 0x50, v10
v_add_co_ci_u32_e32 v17, vcc_lo, s1, v17, vcc_lo
v_lshlrev_b64 v[14:15], 2, v[14:15]
v_add_co_u32 v0, vcc_lo, s0, v0
v_dual_mov_b32 v21, v11 :: v_dual_add_nc_u32 v22, 0x60, v10
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v12, vcc_lo, s0, v12
v_lshlrev_b64 v[18:19], 2, v[18:19]
v_dual_mov_b32 v23, v11 :: v_dual_add_nc_u32 v10, 0x70, v10
v_add_co_ci_u32_e32 v13, vcc_lo, s1, v13, vcc_lo
v_add_co_u32 v14, vcc_lo, s0, v14
v_lshlrev_b64 v[20:21], 2, v[20:21]
v_add_co_ci_u32_e32 v15, vcc_lo, s1, v15, vcc_lo
v_add_co_u32 v18, vcc_lo, s0, v18
v_lshlrev_b64 v[22:23], 2, v[22:23]
v_add_co_ci_u32_e32 v19, vcc_lo, s1, v19, vcc_lo
v_add_co_u32 v20, vcc_lo, s0, v20
v_lshlrev_b64 v[10:11], 2, v[10:11]
v_add_co_ci_u32_e32 v21, vcc_lo, s1, v21, vcc_lo
v_add_co_u32 v22, vcc_lo, s0, v22
v_add_co_ci_u32_e32 v23, vcc_lo, s1, v23, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v10, vcc_lo, s0, v10
v_add_co_ci_u32_e32 v11, vcc_lo, s1, v11, vcc_lo
s_clause 0x7
global_load_b32 v24, v[16:17], off
global_load_b32 v25, v[0:1], off
global_load_b32 v26, v[12:13], off
global_load_b32 v27, v[14:15], off
global_load_b32 v28, v[18:19], off
global_load_b32 v29, v[20:21], off
global_load_b32 v30, v[22:23], off
global_load_b32 v31, v[10:11], off
s_waitcnt vmcnt(6)
v_dual_add_f32 v2, v2, v24 :: v_dual_add_f32 v3, v3, v25
s_waitcnt vmcnt(4)
v_dual_add_f32 v4, v4, v26 :: v_dual_add_f32 v5, v5, v27
s_waitcnt vmcnt(2)
v_dual_add_f32 v6, v6, v28 :: v_dual_add_f32 v7, v7, v29
s_waitcnt vmcnt(0)
v_dual_add_f32 v8, v8, v30 :: v_dual_add_f32 v9, v9, v31
s_clause 0x7
global_store_b32 v[16:17], v2, off
global_store_b32 v[0:1], v3, off
global_store_b32 v[12:13], v4, off
global_store_b32 v[14:15], v5, off
global_store_b32 v[18:19], v6, off
global_store_b32 v[20:21], v7, off
global_store_b32 v[22:23], v8, off
global_store_b32 v[10:11], v9, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7cenergyifPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 32
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7cenergyifPf, .Lfunc_end0-_Z7cenergyifPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected atominfo
.type atominfo,@object
.section .bss,"aw",@nobits
.globl atominfo
.p2align 4, 0x0
atominfo:
.zero 64000
.size atominfo, 64000
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym atominfo
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7cenergyifPf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7cenergyifPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 32
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* CUDA accelerated coulombic potential grid test code
* John E. Stone and Chris Rodrigues
* http://www.ks.uiuc.edu/~johns/
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define CUERR { hipError_t err; \
if ((err = hipGetLastError()) != hipSuccess) { \
printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__); \
return -1; }}
// max constant buffer size is 64KB, minus whatever
// the CUDA runtime and compiler are using that we don't know about
// At 16 bytes for atom, for this program 4070 atoms is about the max
// we can store in the constant buffer.
#define MAXATOMS 4000
__constant__ float4 atominfo[MAXATOMS];
#define UNROLLX 8
#define UNROLLY 1
#define BLOCKSIZEX 16
#define BLOCKSIZEY 16
#define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY
// This kernel calculates coulombic potential at each grid point and
// stores the results in the output array.
//
// This kernel was written by Chris Rodrigues of Wen-mei's group
//
__global__ void cenergy(int numatoms, float gridspacing, float * energygrid) {
unsigned int xindex = __umul24(blockIdx.x, blockDim.x) * UNROLLX
+ threadIdx.x;
unsigned int yindex = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
unsigned int outaddr = (__umul24(gridDim.x, blockDim.x) * UNROLLX) * yindex
+ xindex;
float coory = gridspacing * yindex;
float coorx = gridspacing * xindex;
float energyvalx1=0.0f;
float energyvalx2=0.0f;
float energyvalx3=0.0f;
float energyvalx4=0.0f;
#if UNROLLX == 8
float energyvalx5=0.0f;
float energyvalx6=0.0f;
float energyvalx7=0.0f;
float energyvalx8=0.0f;
#endif
float gridspacing_u = gridspacing * BLOCKSIZEX;
//
// XXX 59/8 FLOPS per atom
//
int atomid;
for (atomid=0; atomid<numatoms; atomid++) {
float dy = coory - atominfo[atomid].y;
float dyz2 = (dy * dy) + atominfo[atomid].z;
float atomq=atominfo[atomid].w;
float dx1 = coorx - atominfo[atomid].x;
float dx2 = dx1 + gridspacing_u;
float dx3 = dx2 + gridspacing_u;
float dx4 = dx3 + gridspacing_u;
#if UNROLLX == 8
float dx5 = dx4 + gridspacing_u;
float dx6 = dx5 + gridspacing_u;
float dx7 = dx6 + gridspacing_u;
float dx8 = dx7 + gridspacing_u;
#endif
energyvalx1 += atomq * rsqrtf(dx1*dx1 + dyz2);
energyvalx2 += atomq * rsqrtf(dx2*dx2 + dyz2);
energyvalx3 += atomq * rsqrtf(dx3*dx3 + dyz2);
energyvalx4 += atomq * rsqrtf(dx4*dx4 + dyz2);
#if UNROLLX == 8
energyvalx5 += atomq * rsqrtf(dx5*dx5 + dyz2);
energyvalx6 += atomq * rsqrtf(dx6*dx6 + dyz2);
energyvalx7 += atomq * rsqrtf(dx7*dx7 + dyz2);
energyvalx8 += atomq * rsqrtf(dx8*dx8 + dyz2);
#endif
}
energygrid[outaddr ] += energyvalx1;
energygrid[outaddr+1*BLOCKSIZEX] += energyvalx2;
energygrid[outaddr+2*BLOCKSIZEX] += energyvalx3;
energygrid[outaddr+3*BLOCKSIZEX] += energyvalx4;
#if UNROLLX == 8
energygrid[outaddr+4*BLOCKSIZEX] += energyvalx5;
energygrid[outaddr+5*BLOCKSIZEX] += energyvalx6;
energygrid[outaddr+6*BLOCKSIZEX] += energyvalx7;
energygrid[outaddr+7*BLOCKSIZEX] += energyvalx8;
#endif
}
int copyatomstoconstbuf(float *atoms, int count, float zplane) {
CUERR // check and clear any existing errors
if (count > MAXATOMS) {
printf("Atom count exceeds constant buffer storage capacity\n");
return -1;
}
float atompre[4*MAXATOMS];
int i;
for (i=0; i<count*4; i+=4) {
atompre[i ] = atoms[i ];
atompre[i + 1] = atoms[i + 1];
float dz = zplane - atoms[i + 2];
atompre[i + 2] = dz*dz;
atompre[i + 3] = atoms[i + 3];
}
hipMemcpyToSymbol(HIP_SYMBOL(atominfo), atompre, count * 4 * sizeof(float), 0);
CUERR // check and clear any existing errors
return 0;
}
int initatoms(float **atombuf, int count, dim3 volsize, float gridspacing) {
dim3 size;
int i;
float *atoms;
atoms = (float *) malloc(count * 4 * sizeof(float));
*atombuf = atoms;
// compute grid dimensions in angstroms
size.x = (unsigned int) gridspacing * volsize.x;
size.y = (unsigned int) gridspacing * volsize.y;
size.z = (unsigned int) gridspacing * volsize.z;
for (i=0; i<count; i++) {
int addr = i * 4;
atoms[addr ] = (rand() / (float) RAND_MAX) * size.x;
atoms[addr + 1] = (rand() / (float) RAND_MAX) * size.y;
atoms[addr + 2] = (rand() / (float) RAND_MAX) * size.z;
atoms[addr + 3] = ((rand() / (float) RAND_MAX) * 2.0) - 1.0; // charge
}
return 0;
} | .text
.file "cuenergy.hip"
.globl _Z22__device_stub__cenergyifPf # -- Begin function _Z22__device_stub__cenergyifPf
.p2align 4, 0x90
.type _Z22__device_stub__cenergyifPf,@function
_Z22__device_stub__cenergyifPf: # @_Z22__device_stub__cenergyifPf
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movl %edi, 4(%rsp)
movss %xmm0, (%rsp)
movq %rsi, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 64(%rsp)
movq %rsp, %rax
movq %rax, 72(%rsp)
leaq 56(%rsp), %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z7cenergyifPf, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z22__device_stub__cenergyifPf, .Lfunc_end0-_Z22__device_stub__cenergyifPf
.cfi_endproc
# -- End function
.globl _Z19copyatomstoconstbufPfif # -- Begin function _Z19copyatomstoconstbufPfif
.p2align 4, 0x90
.type _Z19copyatomstoconstbufPfif,@function
_Z19copyatomstoconstbufPfif: # @_Z19copyatomstoconstbufPfif
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $64016, %rsp # imm = 0xFA10
.cfi_def_cfa_offset 64048
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %rbp, -16
movss %xmm0, 12(%rsp) # 4-byte Spill
movl %esi, %r14d
movq %rdi, %rbx
callq hipGetLastError
testl %eax, %eax
je .LBB1_2
# %bb.1:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
movl $113, %edx
xorl %eax, %eax
callq printf
movl $-1, %ebx
jmp .LBB1_10
.LBB1_2: # %.critedge
movss 12(%rsp), %xmm1 # 4-byte Reload
# xmm1 = mem[0],zero,zero,zero
cmpl $4001, %r14d # imm = 0xFA1
jl .LBB1_4
# %bb.3:
movl $.Lstr, %edi
callq puts@PLT
movl $-1, %ebx
jmp .LBB1_10
.LBB1_4:
leal (,%r14,4), %eax
testl %r14d, %r14d
jle .LBB1_7
# %bb.5: # %.lr.ph.preheader
movl %eax, %ecx
xorl %edx, %edx
.p2align 4, 0x90
.LBB1_6: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rbx,%rdx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, 16(%rsp,%rdx,4)
movss 4(%rbx,%rdx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, 20(%rsp,%rdx,4)
movaps %xmm1, %xmm0
subss 8(%rbx,%rdx,4), %xmm0
mulss %xmm0, %xmm0
movss %xmm0, 24(%rsp,%rdx,4)
movss 12(%rbx,%rdx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, 28(%rsp,%rdx,4)
addq $4, %rdx
cmpq %rcx, %rdx
jb .LBB1_6
.LBB1_7: # %._crit_edge
movslq %eax, %rdx
shlq $2, %rdx
xorl %ebx, %ebx
leaq 16(%rsp), %rsi
movl $atominfo, %edi
xorl %ecx, %ecx
movl $1, %r8d
callq hipMemcpyToSymbol
callq hipGetLastError
movl %eax, %ebp
testl %eax, %eax
je .LBB1_9
# %bb.8:
movl %ebp, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
movl $131, %edx
xorl %eax, %eax
callq printf
.LBB1_9:
negl %ebp
sbbl %ebx, %ebx
.LBB1_10:
movl %ebx, %eax
addq $64016, %rsp # imm = 0xFA10
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z19copyatomstoconstbufPfif, .Lfunc_end1-_Z19copyatomstoconstbufPfif
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z9initatomsPPfi4dim3f
.LCPI2_0:
.long 0x30000000 # float 4.65661287E-10
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI2_1:
.quad 0xbff0000000000000 # double -1
.text
.globl _Z9initatomsPPfi4dim3f
.p2align 4, 0x90
.type _Z9initatomsPPfi4dim3f,@function
_Z9initatomsPPfi4dim3f: # @_Z9initatomsPPfi4dim3f
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $16, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movss %xmm0, 4(%rsp) # 4-byte Spill
movl %ecx, %r14d
movq %rdx, %r12
movl %esi, %r15d
movq %rdi, %r13
leal (,%r15,4), %eax
movslq %eax, %rdi
shlq $2, %rdi
callq malloc
movq %rax, (%r13)
testl %r15d, %r15d
jle .LBB2_3
# %bb.1: # %.lr.ph
movq %rax, %rbx
cvttss2si 4(%rsp), %rax # 4-byte Folded Reload
imull %eax, %r14d
movq %r12, %rcx
shrq $32, %rcx
imull %eax, %r12d
xorps %xmm0, %xmm0
cvtsi2ss %r12, %xmm0
movss %xmm0, 4(%rsp) # 4-byte Spill
imull %eax, %ecx
xorps %xmm0, %xmm0
cvtsi2ss %rcx, %xmm0
movss %xmm0, 12(%rsp) # 4-byte Spill
xorps %xmm0, %xmm0
cvtsi2ss %r14, %xmm0
movss %xmm0, 8(%rsp) # 4-byte Spill
movl %r15d, %r14d
shlq $2, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_2: # =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss .LCPI2_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss %xmm1, %xmm0
mulss 4(%rsp), %xmm0 # 4-byte Folded Reload
movl %r15d, %eax
movss %xmm0, (%rbx,%rax,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI2_0(%rip), %xmm0
mulss 12(%rsp), %xmm0 # 4-byte Folded Reload
leal 1(%r15), %eax
movss %xmm0, (%rbx,%rax,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI2_0(%rip), %xmm0
mulss 8(%rsp), %xmm0 # 4-byte Folded Reload
leal 2(%r15), %eax
movss %xmm0, (%rbx,%rax,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI2_0(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
addsd %xmm0, %xmm0
addsd .LCPI2_1(%rip), %xmm0
cvtsd2ss %xmm0, %xmm0
leal 3(%r15), %eax
movss %xmm0, (%rbx,%rax,4)
addq $4, %r15
cmpq %r15, %r14
jne .LBB2_2
.LBB2_3: # %._crit_edge
xorl %eax, %eax
addq $16, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z9initatomsPPfi4dim3f, .Lfunc_end2-_Z9initatomsPPfi4dim3f
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7cenergyifPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $1, (%rsp)
movl $atominfo, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $64000, %r9d # imm = 0xFA00
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type atominfo,@object # @atominfo
.local atominfo
.comm atominfo,64000,16
.type _Z7cenergyifPf,@object # @_Z7cenergyifPf
.section .rodata,"a",@progbits
.globl _Z7cenergyifPf
.p2align 3, 0x0
_Z7cenergyifPf:
.quad _Z22__device_stub__cenergyifPf
.size _Z7cenergyifPf, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "CUDA error: %s, line %d\n"
.size .L.str, 25
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7cenergyifPf"
.size .L__unnamed_1, 15
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "atominfo"
.size .L__unnamed_2, 9
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Atom count exceeds constant buffer storage capacity"
.size .Lstr, 52
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__cenergyifPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym atominfo
.addrsig_sym _Z7cenergyifPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00025b6d_00000000-6_cuenergy.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "CUDA error: %s, line %d\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Atom count exceeds constant buffer storage capacity\n"
.text
.globl _Z19copyatomstoconstbufPfif
.type _Z19copyatomstoconstbufPfif, @function
_Z19copyatomstoconstbufPfif:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
leaq -61440(%rsp), %r11
.cfi_def_cfa 11, 61464
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $2600, %rsp
.cfi_def_cfa_offset 64064
movq %rdi, %rbx
movl %esi, %ebp
movss %xmm0, 12(%rsp)
movq %fs:40, %rax
movq %rax, 64024(%rsp)
xorl %eax, %eax
call cudaGetLastError@PLT
testl %eax, %eax
jne .L15
cmpl $4000, %ebp
jg .L6
leal 0(,%rbp,4), %edx
leaq 16(%rsp), %rcx
movl $0, %eax
testl %edx, %edx
jle .L8
.L9:
movss (%rbx,%rax,4), %xmm0
movss %xmm0, (%rcx)
movss 4(%rbx,%rax,4), %xmm0
movss %xmm0, 4(%rcx)
movss 12(%rsp), %xmm0
subss 8(%rbx,%rax,4), %xmm0
mulss %xmm0, %xmm0
movss %xmm0, 8(%rcx)
movss 12(%rbx,%rax,4), %xmm0
movss %xmm0, 12(%rcx)
addq $4, %rax
addq $16, %rcx
cmpl %eax, %edx
jg .L9
.L8:
movslq %edx, %rdx
salq $2, %rdx
leaq 16(%rsp), %rsi
movl $1, %r8d
movl $0, %ecx
leaq _ZL8atominfo(%rip), %rdi
call cudaMemcpyToSymbol@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L16
movl $0, %eax
.L3:
movq 64024(%rsp), %rdx
subq %fs:40, %rdx
jne .L17
addq $64040, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $111, %ecx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
jmp .L3
.L6:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
jmp .L3
.L16:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $129, %ecx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
jmp .L3
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z19copyatomstoconstbufPfif, .-_Z19copyatomstoconstbufPfif
.globl _Z9initatomsPPfi4dim3f
.type _Z9initatomsPPfi4dim3f, @function
_Z9initatomsPPfi4dim3f:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %rbp
movl %esi, %ebx
movss %xmm0, 12(%rsp)
movq %rdx, 16(%rsp)
movl %ecx, 24(%rsp)
leal 0(,%rsi,4), %r15d
movslq %r15d, %r15
salq $2, %r15
movq %r15, %rdi
call malloc@PLT
movq %rax, 0(%rbp)
cvttss2siq 12(%rsp), %r12
movl %r12d, %r13d
imull 16(%rsp), %r13d
movl %r12d, %r14d
imull 20(%rsp), %r14d
imull 24(%rsp), %r12d
testl %ebx, %ebx
jle .L19
movq %rax, %rbx
addq %rax, %r15
.L26:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC2(%rip), %xmm0
movl %r13d, %eax
pxor %xmm1, %xmm1
cvtsi2ssq %rax, %xmm1
mulss %xmm1, %xmm0
movss %xmm0, (%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC2(%rip), %xmm0
movl %r14d, %eax
pxor %xmm1, %xmm1
cvtsi2ssq %rax, %xmm1
mulss %xmm1, %xmm0
movss %xmm0, 4(%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC2(%rip), %xmm0
movl %r12d, %eax
pxor %xmm1, %xmm1
cvtsi2ssq %rax, %xmm1
mulss %xmm1, %xmm0
movss %xmm0, 8(%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC2(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
addsd %xmm0, %xmm0
subsd .LC3(%rip), %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 12(%rbx)
addq $16, %rbx
cmpq %r15, %rbx
jne .L26
.L19:
movl $0, %eax
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z9initatomsPPfi4dim3f, .-_Z9initatomsPPfi4dim3f
.globl _Z28__device_stub__Z7cenergyifPfifPf
.type _Z28__device_stub__Z7cenergyifPfifPf, @function
_Z28__device_stub__Z7cenergyifPfifPf:
.LFB2083:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z7cenergyifPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z28__device_stub__Z7cenergyifPfifPf, .-_Z28__device_stub__Z7cenergyifPfifPf
.globl _Z7cenergyifPf
.type _Z7cenergyifPf, @function
_Z7cenergyifPf:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z7cenergyifPfifPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z7cenergyifPf, .-_Z7cenergyifPf
.section .rodata.str1.1
.LC4:
.string "_Z7cenergyifPf"
.LC5:
.string "atominfo"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z7cenergyifPf(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $64000, %r9d
movl $0, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _ZL8atominfo(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL8atominfo
.comm _ZL8atominfo,64000,32
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 805306368
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC3:
.long 0
.long 1072693248
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuenergy.hip"
.globl _Z22__device_stub__cenergyifPf # -- Begin function _Z22__device_stub__cenergyifPf
.p2align 4, 0x90
.type _Z22__device_stub__cenergyifPf,@function
_Z22__device_stub__cenergyifPf: # @_Z22__device_stub__cenergyifPf
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movl %edi, 4(%rsp)
movss %xmm0, (%rsp)
movq %rsi, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 64(%rsp)
movq %rsp, %rax
movq %rax, 72(%rsp)
leaq 56(%rsp), %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z7cenergyifPf, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z22__device_stub__cenergyifPf, .Lfunc_end0-_Z22__device_stub__cenergyifPf
.cfi_endproc
# -- End function
.globl _Z19copyatomstoconstbufPfif # -- Begin function _Z19copyatomstoconstbufPfif
.p2align 4, 0x90
.type _Z19copyatomstoconstbufPfif,@function
_Z19copyatomstoconstbufPfif: # @_Z19copyatomstoconstbufPfif
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $64016, %rsp # imm = 0xFA10
.cfi_def_cfa_offset 64048
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %rbp, -16
movss %xmm0, 12(%rsp) # 4-byte Spill
movl %esi, %r14d
movq %rdi, %rbx
callq hipGetLastError
testl %eax, %eax
je .LBB1_2
# %bb.1:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
movl $113, %edx
xorl %eax, %eax
callq printf
movl $-1, %ebx
jmp .LBB1_10
.LBB1_2: # %.critedge
movss 12(%rsp), %xmm1 # 4-byte Reload
# xmm1 = mem[0],zero,zero,zero
cmpl $4001, %r14d # imm = 0xFA1
jl .LBB1_4
# %bb.3:
movl $.Lstr, %edi
callq puts@PLT
movl $-1, %ebx
jmp .LBB1_10
.LBB1_4:
leal (,%r14,4), %eax
testl %r14d, %r14d
jle .LBB1_7
# %bb.5: # %.lr.ph.preheader
movl %eax, %ecx
xorl %edx, %edx
.p2align 4, 0x90
.LBB1_6: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rbx,%rdx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, 16(%rsp,%rdx,4)
movss 4(%rbx,%rdx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, 20(%rsp,%rdx,4)
movaps %xmm1, %xmm0
subss 8(%rbx,%rdx,4), %xmm0
mulss %xmm0, %xmm0
movss %xmm0, 24(%rsp,%rdx,4)
movss 12(%rbx,%rdx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, 28(%rsp,%rdx,4)
addq $4, %rdx
cmpq %rcx, %rdx
jb .LBB1_6
.LBB1_7: # %._crit_edge
movslq %eax, %rdx
shlq $2, %rdx
xorl %ebx, %ebx
leaq 16(%rsp), %rsi
movl $atominfo, %edi
xorl %ecx, %ecx
movl $1, %r8d
callq hipMemcpyToSymbol
callq hipGetLastError
movl %eax, %ebp
testl %eax, %eax
je .LBB1_9
# %bb.8:
movl %ebp, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
movl $131, %edx
xorl %eax, %eax
callq printf
.LBB1_9:
negl %ebp
sbbl %ebx, %ebx
.LBB1_10:
movl %ebx, %eax
addq $64016, %rsp # imm = 0xFA10
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z19copyatomstoconstbufPfif, .Lfunc_end1-_Z19copyatomstoconstbufPfif
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z9initatomsPPfi4dim3f
.LCPI2_0:
.long 0x30000000 # float 4.65661287E-10
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI2_1:
.quad 0xbff0000000000000 # double -1
.text
.globl _Z9initatomsPPfi4dim3f
.p2align 4, 0x90
.type _Z9initatomsPPfi4dim3f,@function
_Z9initatomsPPfi4dim3f: # @_Z9initatomsPPfi4dim3f
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $16, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movss %xmm0, 4(%rsp) # 4-byte Spill
movl %ecx, %r14d
movq %rdx, %r12
movl %esi, %r15d
movq %rdi, %r13
leal (,%r15,4), %eax
movslq %eax, %rdi
shlq $2, %rdi
callq malloc
movq %rax, (%r13)
testl %r15d, %r15d
jle .LBB2_3
# %bb.1: # %.lr.ph
movq %rax, %rbx
cvttss2si 4(%rsp), %rax # 4-byte Folded Reload
imull %eax, %r14d
movq %r12, %rcx
shrq $32, %rcx
imull %eax, %r12d
xorps %xmm0, %xmm0
cvtsi2ss %r12, %xmm0
movss %xmm0, 4(%rsp) # 4-byte Spill
imull %eax, %ecx
xorps %xmm0, %xmm0
cvtsi2ss %rcx, %xmm0
movss %xmm0, 12(%rsp) # 4-byte Spill
xorps %xmm0, %xmm0
cvtsi2ss %r14, %xmm0
movss %xmm0, 8(%rsp) # 4-byte Spill
movl %r15d, %r14d
shlq $2, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_2: # =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss .LCPI2_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss %xmm1, %xmm0
mulss 4(%rsp), %xmm0 # 4-byte Folded Reload
movl %r15d, %eax
movss %xmm0, (%rbx,%rax,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI2_0(%rip), %xmm0
mulss 12(%rsp), %xmm0 # 4-byte Folded Reload
leal 1(%r15), %eax
movss %xmm0, (%rbx,%rax,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI2_0(%rip), %xmm0
mulss 8(%rsp), %xmm0 # 4-byte Folded Reload
leal 2(%r15), %eax
movss %xmm0, (%rbx,%rax,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI2_0(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
addsd %xmm0, %xmm0
addsd .LCPI2_1(%rip), %xmm0
cvtsd2ss %xmm0, %xmm0
leal 3(%r15), %eax
movss %xmm0, (%rbx,%rax,4)
addq $4, %r15
cmpq %r15, %r14
jne .LBB2_2
.LBB2_3: # %._crit_edge
xorl %eax, %eax
addq $16, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z9initatomsPPfi4dim3f, .Lfunc_end2-_Z9initatomsPPfi4dim3f
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7cenergyifPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $1, (%rsp)
movl $atominfo, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $64000, %r9d # imm = 0xFA00
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type atominfo,@object # @atominfo
.local atominfo
.comm atominfo,64000,16
.type _Z7cenergyifPf,@object # @_Z7cenergyifPf
.section .rodata,"a",@progbits
.globl _Z7cenergyifPf
.p2align 3, 0x0
_Z7cenergyifPf:
.quad _Z22__device_stub__cenergyifPf
.size _Z7cenergyifPf, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "CUDA error: %s, line %d\n"
.size .L.str, 25
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7cenergyifPf"
.size .L__unnamed_1, 15
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "atominfo"
.size .L__unnamed_2, 9
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Atom count exceeds constant buffer storage capacity"
.size .Lstr, 52
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__cenergyifPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym atominfo
.addrsig_sym _Z7cenergyifPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <unistd.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define size 21 // Tamanho da matrix
// Exibe os pontos na tela
__host__ void print(bool grid[][size]){
std::cout << "\n\n\n\n\n";
for(unsigned int i = 1; i < size-1; i++) {
for(unsigned int j = 1; j < size-1; j++)
std::cout << (grid[i][j]?"#":"_");
std::cout << std::endl;
}
}
__host__ bool someoneAlive(bool grid[][size]){
for(unsigned int i=0; i < size; i++)
for(unsigned int j=0; j < size; j++)
if(grid[i][j]==true) return true;
return false;
}
// Calcula a simulacao
__global__ void jogo(bool grid[][size]){
int m=blockIdx.x*blockDim.x+threadIdx.x;
int n=blockIdx.y*blockDim.y+threadIdx.y;
if (m<size && n<size){
// printf("m: %d n: %d\n",m,n);
bool grid_tmp[size][size] = {};
for(unsigned int i=0; i < size; i++){
for(unsigned int j=0; j < size; j++){
grid_tmp[i][j] = grid[i][j];
// printf("%d",grid[i][j]);
}
// printf("\n");
}
unsigned int count = 0;
for(int k = -1; k <= 1; k++)
for(int l = -1; l <= 1; l++)
if(k != 0 || l != 0)
if(grid_tmp[m+k][n+l])
++count;
if(count < 2 || count > 3){
grid[m][n] = false;
// printf("m: %d n: %d MORREU\n",m,n);
// printf("count = %d\n", count);
}
else {
if(count == 3){
grid[m][n] = true;
// printf("m: %d n: %d REVIVEU\n",m,n);
}
}
}
return;
}
int main(){
// bool grid[size][size] = {}; // dados iniciais
bool grid[size][size] = {}; // dados iniciais
grid[ 5][ 7] = true;
grid[ 6][ 8] = true;
grid[ 8][ 8] = true;
grid[ 6][ 9] = true;
grid[ 8][10] = true;
grid[ 9][10] = true;
grid[ 8][11] = true;
grid[10][11] = true;
grid[10][12] = true;
bool d_grid[size][size];
int mem_size = size*size*sizeof(bool);
cudaMalloc((void **) d_grid, mem_size);
int nthreads = 7;
dim3 blocks(size/nthreads+1,size/nthreads+1);
dim3 threads(nthreads,nthreads);
while(someoneAlive(grid)){
cudaMemcpy(d_grid, grid, mem_size, cudaMemcpyHostToDevice);
jogo<<<blocks,threads>>>(d_grid);
cudaDeviceSynchronize();
cudaMemcpy(grid, d_grid, mem_size, cudaMemcpyDeviceToHost);
print(grid);
usleep(100000);
return 0;
}
cudaFree(d_grid);
} | .file "tmpxft_0019ba00_00000000-6_q1try.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3687:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3687:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "#"
.LC1:
.string "_"
.LC2:
.string "\n\n\n\n\n"
.text
.globl _Z5printPA21_b
.type _Z5printPA21_b, @function
_Z5printPA21_b:
.LFB3682:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r15
movl $5, %edx
leaq .LC2(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
leaq 41(%r15), %rbp
addq $440, %r15
leaq .LC1(%rip), %r14
leaq .LC0(%rip), %r13
leaq _ZSt4cout(%rip), %r12
jmp .L4
.L14:
call _ZSt16__throw_bad_castv@PLT
.L15:
movzbl 67(%rbx), %esi
.L9:
movsbl %sil, %esi
movq %r12, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $21, %rbp
cmpq %r15, %rbp
je .L3
.L4:
leaq -19(%rbp), %rbx
.L6:
cmpb $0, (%rbx)
movq %r13, %rsi
cmove %r14, %rsi
movl $1, %edx
movq %r12, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $1, %rbx
cmpq %rbx, %rbp
jne .L6
movq (%r12), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %rbx
testq %rbx, %rbx
je .L14
cmpb $0, 56(%rbx)
jne .L15
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L9
.L3:
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3682:
.size _Z5printPA21_b, .-_Z5printPA21_b
.globl _Z12someoneAlivePA21_b
.type _Z12someoneAlivePA21_b, @function
_Z12someoneAlivePA21_b:
.LFB3683:
.cfi_startproc
endbr64
leaq 21(%rdi), %rcx
addq $462, %rdi
.L17:
leaq -21(%rcx), %rax
.L19:
movzbl (%rax), %edx
testb %dl, %dl
jne .L16
addq $1, %rax
cmpq %rcx, %rax
jne .L19
addq $21, %rcx
cmpq %rdi, %rcx
jne .L17
.L16:
movl %edx, %eax
ret
.cfi_endproc
.LFE3683:
.size _Z12someoneAlivePA21_b, .-_Z12someoneAlivePA21_b
.globl _Z27__device_stub__Z4jogoPA21_bPA21_b
.type _Z27__device_stub__Z4jogoPA21_bPA21_b, @function
_Z27__device_stub__Z4jogoPA21_bPA21_b:
.LFB3709:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L25
.L21:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4jogoPA21_b(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L21
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3709:
.size _Z27__device_stub__Z4jogoPA21_bPA21_b, .-_Z27__device_stub__Z4jogoPA21_bPA21_b
.globl _Z4jogoPA21_b
.type _Z4jogoPA21_b, @function
_Z4jogoPA21_b:
.LFB3710:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z4jogoPA21_bPA21_b
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3710:
.size _Z4jogoPA21_b, .-_Z4jogoPA21_b
.globl main
.type main, @function
main:
.LFB3684:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $944, %rsp
.cfi_def_cfa_offset 960
movq %fs:40, %rax
movq %rax, 936(%rsp)
xorl %eax, %eax
leaq 32(%rsp), %rdi
movl $55, %ecx
rep stosq
movb $0, (%rdi)
movb $1, 144(%rsp)
movb $1, 166(%rsp)
movb $1, 208(%rsp)
movb $1, 167(%rsp)
movb $1, 210(%rsp)
movb $1, 231(%rsp)
movb $1, 211(%rsp)
movb $1, 253(%rsp)
movb $1, 254(%rsp)
leaq 480(%rsp), %rdi
movl $441, %esi
call cudaMalloc@PLT
movl $4, 8(%rsp)
movl $4, 12(%rsp)
movl $1, 16(%rsp)
movl $7, 20(%rsp)
movl $7, 24(%rsp)
movl $1, 28(%rsp)
leaq 32(%rsp), %rdi
call _Z12someoneAlivePA21_b
testb %al, %al
je .L30
leaq 32(%rsp), %rsi
leaq 480(%rsp), %rdi
movl $1, %ecx
movl $441, %edx
call cudaMemcpy@PLT
movl 28(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movq 8(%rsp), %rdi
movl 16(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L35
.L31:
call cudaDeviceSynchronize@PLT
leaq 480(%rsp), %rsi
leaq 32(%rsp), %rbx
movl $2, %ecx
movl $441, %edx
movq %rbx, %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call _Z5printPA21_b
movl $100000, %edi
call usleep@PLT
.L32:
movq 936(%rsp), %rax
subq %fs:40, %rax
jne .L36
movl $0, %eax
addq $944, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L35:
.cfi_restore_state
leaq 480(%rsp), %rdi
call _Z27__device_stub__Z4jogoPA21_bPA21_b
jmp .L31
.L30:
leaq 480(%rsp), %rdi
call cudaFree@PLT
jmp .L32
.L36:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3684:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z4jogoPA21_b"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3712:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z4jogoPA21_b(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3712:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <unistd.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define size 21 // Tamanho da matrix
// Exibe os pontos na tela
__host__ void print(bool grid[][size]){
std::cout << "\n\n\n\n\n";
for(unsigned int i = 1; i < size-1; i++) {
for(unsigned int j = 1; j < size-1; j++)
std::cout << (grid[i][j]?"#":"_");
std::cout << std::endl;
}
}
__host__ bool someoneAlive(bool grid[][size]){
for(unsigned int i=0; i < size; i++)
for(unsigned int j=0; j < size; j++)
if(grid[i][j]==true) return true;
return false;
}
// Calcula a simulacao
__global__ void jogo(bool grid[][size]){
int m=blockIdx.x*blockDim.x+threadIdx.x;
int n=blockIdx.y*blockDim.y+threadIdx.y;
if (m<size && n<size){
// printf("m: %d n: %d\n",m,n);
bool grid_tmp[size][size] = {};
for(unsigned int i=0; i < size; i++){
for(unsigned int j=0; j < size; j++){
grid_tmp[i][j] = grid[i][j];
// printf("%d",grid[i][j]);
}
// printf("\n");
}
unsigned int count = 0;
for(int k = -1; k <= 1; k++)
for(int l = -1; l <= 1; l++)
if(k != 0 || l != 0)
if(grid_tmp[m+k][n+l])
++count;
if(count < 2 || count > 3){
grid[m][n] = false;
// printf("m: %d n: %d MORREU\n",m,n);
// printf("count = %d\n", count);
}
else {
if(count == 3){
grid[m][n] = true;
// printf("m: %d n: %d REVIVEU\n",m,n);
}
}
}
return;
}
int main(){
// bool grid[size][size] = {}; // dados iniciais
bool grid[size][size] = {}; // dados iniciais
grid[ 5][ 7] = true;
grid[ 6][ 8] = true;
grid[ 8][ 8] = true;
grid[ 6][ 9] = true;
grid[ 8][10] = true;
grid[ 9][10] = true;
grid[ 8][11] = true;
grid[10][11] = true;
grid[10][12] = true;
bool d_grid[size][size];
int mem_size = size*size*sizeof(bool);
cudaMalloc((void **) d_grid, mem_size);
int nthreads = 7;
dim3 blocks(size/nthreads+1,size/nthreads+1);
dim3 threads(nthreads,nthreads);
while(someoneAlive(grid)){
cudaMemcpy(d_grid, grid, mem_size, cudaMemcpyHostToDevice);
jogo<<<blocks,threads>>>(d_grid);
cudaDeviceSynchronize();
cudaMemcpy(grid, d_grid, mem_size, cudaMemcpyDeviceToHost);
print(grid);
usleep(100000);
return 0;
}
cudaFree(d_grid);
} | #include <iostream>
#include <unistd.h>
#include <hip/hip_runtime.h>
#define size 21 // Tamanho da matrix
// Exibe os pontos na tela
__host__ void print(bool grid[][size]){
std::cout << "\n\n\n\n\n";
for(unsigned int i = 1; i < size-1; i++) {
for(unsigned int j = 1; j < size-1; j++)
std::cout << (grid[i][j]?"#":"_");
std::cout << std::endl;
}
}
__host__ bool someoneAlive(bool grid[][size]){
for(unsigned int i=0; i < size; i++)
for(unsigned int j=0; j < size; j++)
if(grid[i][j]==true) return true;
return false;
}
// Calcula a simulacao
__global__ void jogo(bool grid[][size]){
int m=blockIdx.x*blockDim.x+threadIdx.x;
int n=blockIdx.y*blockDim.y+threadIdx.y;
if (m<size && n<size){
// printf("m: %d n: %d\n",m,n);
bool grid_tmp[size][size] = {};
for(unsigned int i=0; i < size; i++){
for(unsigned int j=0; j < size; j++){
grid_tmp[i][j] = grid[i][j];
// printf("%d",grid[i][j]);
}
// printf("\n");
}
unsigned int count = 0;
for(int k = -1; k <= 1; k++)
for(int l = -1; l <= 1; l++)
if(k != 0 || l != 0)
if(grid_tmp[m+k][n+l])
++count;
if(count < 2 || count > 3){
grid[m][n] = false;
// printf("m: %d n: %d MORREU\n",m,n);
// printf("count = %d\n", count);
}
else {
if(count == 3){
grid[m][n] = true;
// printf("m: %d n: %d REVIVEU\n",m,n);
}
}
}
return;
}
int main(){
// bool grid[size][size] = {}; // dados iniciais
bool grid[size][size] = {}; // dados iniciais
grid[ 5][ 7] = true;
grid[ 6][ 8] = true;
grid[ 8][ 8] = true;
grid[ 6][ 9] = true;
grid[ 8][10] = true;
grid[ 9][10] = true;
grid[ 8][11] = true;
grid[10][11] = true;
grid[10][12] = true;
bool d_grid[size][size];
int mem_size = size*size*sizeof(bool);
hipMalloc((void **) d_grid, mem_size);
int nthreads = 7;
dim3 blocks(size/nthreads+1,size/nthreads+1);
dim3 threads(nthreads,nthreads);
while(someoneAlive(grid)){
hipMemcpy(d_grid, grid, mem_size, hipMemcpyHostToDevice);
jogo<<<blocks,threads>>>(d_grid);
hipDeviceSynchronize();
hipMemcpy(grid, d_grid, mem_size, hipMemcpyDeviceToHost);
print(grid);
usleep(100000);
return 0;
}
hipFree(d_grid);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <iostream>
#include <unistd.h>
#include <hip/hip_runtime.h>
#define size 21 // Tamanho da matrix
// Exibe os pontos na tela
__host__ void print(bool grid[][size]){
std::cout << "\n\n\n\n\n";
for(unsigned int i = 1; i < size-1; i++) {
for(unsigned int j = 1; j < size-1; j++)
std::cout << (grid[i][j]?"#":"_");
std::cout << std::endl;
}
}
__host__ bool someoneAlive(bool grid[][size]){
for(unsigned int i=0; i < size; i++)
for(unsigned int j=0; j < size; j++)
if(grid[i][j]==true) return true;
return false;
}
// Calcula a simulacao
__global__ void jogo(bool grid[][size]){
int m=blockIdx.x*blockDim.x+threadIdx.x;
int n=blockIdx.y*blockDim.y+threadIdx.y;
if (m<size && n<size){
// printf("m: %d n: %d\n",m,n);
bool grid_tmp[size][size] = {};
for(unsigned int i=0; i < size; i++){
for(unsigned int j=0; j < size; j++){
grid_tmp[i][j] = grid[i][j];
// printf("%d",grid[i][j]);
}
// printf("\n");
}
unsigned int count = 0;
for(int k = -1; k <= 1; k++)
for(int l = -1; l <= 1; l++)
if(k != 0 || l != 0)
if(grid_tmp[m+k][n+l])
++count;
if(count < 2 || count > 3){
grid[m][n] = false;
// printf("m: %d n: %d MORREU\n",m,n);
// printf("count = %d\n", count);
}
else {
if(count == 3){
grid[m][n] = true;
// printf("m: %d n: %d REVIVEU\n",m,n);
}
}
}
return;
}
int main(){
// bool grid[size][size] = {}; // dados iniciais
bool grid[size][size] = {}; // dados iniciais
grid[ 5][ 7] = true;
grid[ 6][ 8] = true;
grid[ 8][ 8] = true;
grid[ 6][ 9] = true;
grid[ 8][10] = true;
grid[ 9][10] = true;
grid[ 8][11] = true;
grid[10][11] = true;
grid[10][12] = true;
bool d_grid[size][size];
int mem_size = size*size*sizeof(bool);
hipMalloc((void **) d_grid, mem_size);
int nthreads = 7;
dim3 blocks(size/nthreads+1,size/nthreads+1);
dim3 threads(nthreads,nthreads);
while(someoneAlive(grid)){
hipMemcpy(d_grid, grid, mem_size, hipMemcpyHostToDevice);
jogo<<<blocks,threads>>>(d_grid);
hipDeviceSynchronize();
hipMemcpy(grid, d_grid, mem_size, hipMemcpyDeviceToHost);
print(grid);
usleep(100000);
return 0;
}
hipFree(d_grid);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4jogoPA21_b
.globl _Z4jogoPA21_b
.p2align 8
.type _Z4jogoPA21_b,@function
_Z4jogoPA21_b:
s_load_b32 s2, s[0:1], 0x14
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s14, s8, v[2:3]
s_mul_i32 s9, s15, s2
s_mov_b32 s2, exec_lo
v_add_nc_u32_e32 v1, s9, v3
v_max_i32_e32 v4, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e32 21, v4
s_cbranch_execz .LBB0_15
s_load_b64 s[0:1], s[0:1], 0x0
s_mov_b32 s4, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_mov_b32 s5, s4
s_mov_b32 s6, s4
s_mov_b32 s7, s4
v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v9, s7
v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v7, s5
s_delay_alu instid0(VALU_DEP_2)
v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v5, v4
s_clause 0x18
scratch_store_b8 off, v4, off offset:456
scratch_store_b64 off, v[4:5], off offset:448
scratch_store_b128 off, v[6:9], off offset:432
scratch_store_b128 off, v[6:9], off offset:416
scratch_store_b128 off, v[6:9], off offset:400
scratch_store_b128 off, v[6:9], off offset:384
scratch_store_b128 off, v[6:9], off offset:368
scratch_store_b128 off, v[6:9], off offset:352
scratch_store_b128 off, v[6:9], off offset:336
scratch_store_b128 off, v[6:9], off offset:320
scratch_store_b128 off, v[6:9], off offset:304
scratch_store_b128 off, v[6:9], off offset:288
scratch_store_b128 off, v[6:9], off offset:272
scratch_store_b128 off, v[6:9], off offset:256
scratch_store_b128 off, v[6:9], off offset:240
scratch_store_b128 off, v[6:9], off offset:224
scratch_store_b128 off, v[6:9], off offset:208
scratch_store_b128 off, v[6:9], off offset:192
scratch_store_b128 off, v[6:9], off offset:176
scratch_store_b128 off, v[6:9], off offset:160
scratch_store_b128 off, v[6:9], off offset:144
scratch_store_b128 off, v[6:9], off offset:128
scratch_store_b128 off, v[6:9], off offset:112
scratch_store_b128 off, v[6:9], off offset:96
scratch_store_b128 off, v[6:9], off offset:80
v_dual_mov_b32 v4, 16 :: v_dual_mov_b32 v5, 0
s_clause 0x1
scratch_store_b128 off, v[6:9], off offset:64
scratch_store_b128 off, v[6:9], off offset:48
s_waitcnt lgkmcnt(0)
s_mov_b64 s[2:3], s[0:1]
s_clause 0x1
scratch_store_b128 off, v[6:9], off offset:32
scratch_store_b128 off, v[6:9], off offset:16
.p2align 6
.LBB0_2:
v_mov_b32_e32 v6, v4
s_mov_b64 s[6:7], 0
.LBB0_3:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s10, s2, s6
s_addc_u32 s11, s3, s7
s_add_u32 s6, s6, 1
global_load_u8 v7, v5, s[10:11]
s_addc_u32 s7, s7, 0
s_cmp_eq_u32 s6, 21
s_waitcnt vmcnt(0)
scratch_store_b8 v6, v7, off
v_add_nc_u32_e32 v6, 1, v6
s_cbranch_scc0 .LBB0_3
s_add_i32 s4, s4, 1
v_add_nc_u32_e32 v4, 21, v4
s_add_u32 s2, s2, 21
s_addc_u32 s3, s3, 0
s_cmp_lg_u32 s4, 21
s_cbranch_scc1 .LBB0_2
s_mul_i32 s2, s14, s8
v_mul_u32_u24_e32 v2, 21, v2
s_mul_i32 s2, s2, 21
v_mov_b32_e32 v4, 16
s_add_i32 s9, s9, s2
s_mov_b32 s2, -1
v_add3_u32 v2, s9, v3, v2
s_delay_alu instid0(VALU_DEP_1)
v_add3_u32 v3, v2, v4, 0xffffffea
v_mov_b32_e32 v2, 0
s_branch .LBB0_7
.p2align 6
.LBB0_6:
v_add_nc_u32_e32 v3, 21, v3
s_add_i32 s2, s2, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s2, 2
s_cbranch_scc1 .LBB0_11
.LBB0_7:
s_mov_b32 s3, 0
s_branch .LBB0_9
.LBB0_8:
s_add_i32 s3, s3, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s3, 3
s_cbranch_scc1 .LBB0_6
.LBB0_9:
s_add_i32 s4, s3, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_or_b32 s4, s4, s2
s_cmp_eq_u32 s4, 0
s_cbranch_scc1 .LBB0_8
v_add_nc_u32_e32 v4, s3, v3
scratch_load_u8 v4, v4, off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v2, v4
s_branch .LBB0_8
.LBB0_11:
v_and_b32_e32 v3, -2, v2
s_delay_alu instid0(VALU_DEP_1)
v_cmp_ne_u32_e64 s2, 2, v3
v_cmp_eq_u32_e32 vcc_lo, 2, v3
v_mov_b32_e32 v3, 0
s_and_saveexec_b32 s3, vcc_lo
v_cmp_eq_u32_e32 vcc_lo, 3, v2
v_mov_b32_e32 v3, 1
s_and_not1_b32 s2, s2, exec_lo
s_and_b32 s4, vcc_lo, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s2, s2, s4
s_or_b32 exec_lo, exec_lo, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s2
s_cbranch_execz .LBB0_15
v_mad_i64_i32 v[4:5], null, v0, 21, s[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v4, v1
v_add_co_ci_u32_e32 v1, vcc_lo, v5, v2, vcc_lo
global_store_b8 v[0:1], v3, off
.LBB0_15:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4jogoPA21_b
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 464
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 1
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4jogoPA21_b, .Lfunc_end0-_Z4jogoPA21_b
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4jogoPA21_b
.private_segment_fixed_size: 464
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z4jogoPA21_b.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <iostream>
#include <unistd.h>
#include <hip/hip_runtime.h>
#define size 21 // Tamanho da matrix
// Exibe os pontos na tela
__host__ void print(bool grid[][size]){
std::cout << "\n\n\n\n\n";
for(unsigned int i = 1; i < size-1; i++) {
for(unsigned int j = 1; j < size-1; j++)
std::cout << (grid[i][j]?"#":"_");
std::cout << std::endl;
}
}
__host__ bool someoneAlive(bool grid[][size]){
for(unsigned int i=0; i < size; i++)
for(unsigned int j=0; j < size; j++)
if(grid[i][j]==true) return true;
return false;
}
// Calcula a simulacao
__global__ void jogo(bool grid[][size]){
int m=blockIdx.x*blockDim.x+threadIdx.x;
int n=blockIdx.y*blockDim.y+threadIdx.y;
if (m<size && n<size){
// printf("m: %d n: %d\n",m,n);
bool grid_tmp[size][size] = {};
for(unsigned int i=0; i < size; i++){
for(unsigned int j=0; j < size; j++){
grid_tmp[i][j] = grid[i][j];
// printf("%d",grid[i][j]);
}
// printf("\n");
}
unsigned int count = 0;
for(int k = -1; k <= 1; k++)
for(int l = -1; l <= 1; l++)
if(k != 0 || l != 0)
if(grid_tmp[m+k][n+l])
++count;
if(count < 2 || count > 3){
grid[m][n] = false;
// printf("m: %d n: %d MORREU\n",m,n);
// printf("count = %d\n", count);
}
else {
if(count == 3){
grid[m][n] = true;
// printf("m: %d n: %d REVIVEU\n",m,n);
}
}
}
return;
}
int main(){
// bool grid[size][size] = {}; // dados iniciais
bool grid[size][size] = {}; // dados iniciais
grid[ 5][ 7] = true;
grid[ 6][ 8] = true;
grid[ 8][ 8] = true;
grid[ 6][ 9] = true;
grid[ 8][10] = true;
grid[ 9][10] = true;
grid[ 8][11] = true;
grid[10][11] = true;
grid[10][12] = true;
bool d_grid[size][size];
int mem_size = size*size*sizeof(bool);
hipMalloc((void **) d_grid, mem_size);
int nthreads = 7;
dim3 blocks(size/nthreads+1,size/nthreads+1);
dim3 threads(nthreads,nthreads);
while(someoneAlive(grid)){
hipMemcpy(d_grid, grid, mem_size, hipMemcpyHostToDevice);
jogo<<<blocks,threads>>>(d_grid);
hipDeviceSynchronize();
hipMemcpy(grid, d_grid, mem_size, hipMemcpyDeviceToHost);
print(grid);
usleep(100000);
return 0;
}
hipFree(d_grid);
} | .text
.file "q1try.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z5printPA21_b # -- Begin function _Z5printPA21_b
.p2align 4, 0x90
.type _Z5printPA21_b,@function
_Z5printPA21_b: # @_Z5printPA21_b
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $5, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
addq $22, %rbx
movl $1, %r15d
jmp .LBB0_1
.p2align 4, 0x90
.LBB0_8: # in Loop: Header=BB0_1 Depth=1
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB0_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB0_1 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r15
addq $21, %rbx
cmpq $20, %r15
je .LBB0_10
.LBB0_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_2 Depth 2
xorl %r14d, %r14d
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_4: # in Loop: Header=BB0_2 Depth=2
movl $_ZSt4cout, %edi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r14
cmpq $19, %r14
je .LBB0_5
.LBB0_2: # Parent Loop BB0_1 Depth=1
# => This Inner Loop Header: Depth=2
cmpb $0, (%rbx,%r14)
movl $.L.str.2, %esi
je .LBB0_4
# %bb.3: # in Loop: Header=BB0_2 Depth=2
movl $.L.str.1, %esi
jmp .LBB0_4
.p2align 4, 0x90
.LBB0_5: # in Loop: Header=BB0_1 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB0_11
# %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB0_1 Depth=1
cmpb $0, 56(%r14)
je .LBB0_8
# %bb.7: # in Loop: Header=BB0_1 Depth=1
movzbl 67(%r14), %eax
jmp .LBB0_9
.LBB0_10:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB0_11:
.cfi_def_cfa_offset 32
callq _ZSt16__throw_bad_castv
.Lfunc_end0:
.size _Z5printPA21_b, .Lfunc_end0-_Z5printPA21_b
.cfi_endproc
# -- End function
.globl _Z12someoneAlivePA21_b # -- Begin function _Z12someoneAlivePA21_b
.p2align 4, 0x90
.type _Z12someoneAlivePA21_b,@function
_Z12someoneAlivePA21_b: # @_Z12someoneAlivePA21_b
.cfi_startproc
# %bb.0:
leaq 1(%rdi), %rcx
movb $1, %al
xorl %edx, %edx
jmp .LBB1_1
.p2align 4, 0x90
.LBB1_6: # %.critedge
# in Loop: Header=BB1_1 Depth=1
cmpq $20, %rdx
leaq 1(%rdx), %rsi
setb %al
addq $21, %rcx
movq %rsi, %rdx
cmpq $21, %rsi
je .LBB1_7
.LBB1_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_3 Depth 2
leaq (%rdx,%rdx,4), %rsi
leaq (%rdx,%rsi,4), %rsi
cmpb $0, (%rdi,%rsi)
jne .LBB1_7
# %bb.2: # %.lr.ph.preheader
# in Loop: Header=BB1_1 Depth=1
movq $-1, %rsi
.p2align 4, 0x90
.LBB1_3: # %.lr.ph
# Parent Loop BB1_1 Depth=1
# => This Inner Loop Header: Depth=2
cmpq $19, %rsi
je .LBB1_6
# %bb.4: # in Loop: Header=BB1_3 Depth=2
leaq 1(%rsi), %r8
cmpb $0, 1(%rcx,%rsi)
movq %r8, %rsi
je .LBB1_3
# %bb.5: # %._crit_edge
# in Loop: Header=BB1_1 Depth=1
cmpq $20, %r8
jae .LBB1_6
.LBB1_7: # %.critedge26
andb $1, %al
retq
.Lfunc_end1:
.size _Z12someoneAlivePA21_b, .Lfunc_end1-_Z12someoneAlivePA21_b
.cfi_endproc
# -- End function
.globl _Z19__device_stub__jogoPA21_b # -- Begin function _Z19__device_stub__jogoPA21_b
.p2align 4, 0x90
.type _Z19__device_stub__jogoPA21_b,@function
_Z19__device_stub__jogoPA21_b: # @_Z19__device_stub__jogoPA21_b
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z4jogoPA21_b, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end2:
.size _Z19__device_stub__jogoPA21_b, .Lfunc_end2-_Z19__device_stub__jogoPA21_b
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $960, %rsp # imm = 0x3C0
.cfi_def_cfa_offset 976
.cfi_offset %rbx, -16
leaq 64(%rsp), %rdi
movl $441, %edx # imm = 0x1B9
xorl %esi, %esi
callq memset@PLT
movb $1, 176(%rsp)
movw $257, 198(%rsp) # imm = 0x101
movb $1, 240(%rsp)
movw $257, 242(%rsp) # imm = 0x101
movb $1, 263(%rsp)
movw $257, 285(%rsp) # imm = 0x101
leaq 512(%rsp), %rdi
movl $441, %esi # imm = 0x1B9
callq hipMalloc
cmpb $0, 64(%rsp)
jne .LBB3_9
# %bb.1: # %.lr.ph.preheader.preheader
movb $1, %al
leaq 65(%rsp), %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB3_3: # %.lr.ph.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
movq %rdx, %rsi
movq $-1, %rdx
.p2align 4, 0x90
.LBB3_4: # %.lr.ph
# Parent Loop BB3_3 Depth=1
# => This Inner Loop Header: Depth=2
cmpq $19, %rdx
je .LBB3_7
# %bb.5: # in Loop: Header=BB3_4 Depth=2
leaq 1(%rdx), %rdi
cmpb $0, 1(%rcx,%rdx)
movq %rdi, %rdx
je .LBB3_4
# %bb.6: # %._crit_edge
# in Loop: Header=BB3_3 Depth=1
cmpq $20, %rdi
jb .LBB3_8
.LBB3_7: # %.critedge.i
# in Loop: Header=BB3_3 Depth=1
leaq 1(%rsi), %rdx
cmpq $20, %rsi
setb %al
cmpq $21, %rdx
je .LBB3_8
# %bb.2: # %.preheader.i
# in Loop: Header=BB3_3 Depth=1
leaq (%rdx,%rdx,4), %rsi
leaq (%rdx,%rsi,4), %rsi
addq $21, %rcx
cmpb $0, 64(%rsp,%rsi)
je .LBB3_3
.LBB3_8: # %_Z12someoneAlivePA21_b.exit
testb $1, %al
je .LBB3_12
.LBB3_9: # %.critedge
leaq 512(%rsp), %rbx
leaq 64(%rsp), %rsi
movl $441, %edx # imm = 0x1B9
movq %rbx, %rdi
movl $1, %ecx
callq hipMemcpy
movabsq $17179869188, %rdi # imm = 0x400000004
movabsq $30064771079, %rdx # imm = 0x700000007
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_11
# %bb.10:
movq %rbx, 56(%rsp)
leaq 56(%rsp), %rax
movq %rax, (%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
movq %rsp, %r9
movl $_Z4jogoPA21_b, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_11:
callq hipDeviceSynchronize
leaq 64(%rsp), %rbx
leaq 512(%rsp), %rsi
movl $441, %edx # imm = 0x1B9
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq %rbx, %rdi
callq _Z5printPA21_b
movl $100000, %edi # imm = 0x186A0
callq usleep
.LBB3_13:
xorl %eax, %eax
addq $960, %rsp # imm = 0x3C0
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB3_12:
.cfi_def_cfa_offset 976
leaq 512(%rsp), %rdi
callq hipFree
jmp .LBB3_13
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4jogoPA21_b, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\n\n\n\n\n"
.size .L.str, 6
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "#"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "_"
.size .L.str.2, 2
.type _Z4jogoPA21_b,@object # @_Z4jogoPA21_b
.section .rodata,"a",@progbits
.globl _Z4jogoPA21_b
.p2align 3, 0x0
_Z4jogoPA21_b:
.quad _Z19__device_stub__jogoPA21_b
.size _Z4jogoPA21_b, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z4jogoPA21_b"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__jogoPA21_b
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZSt4cout
.addrsig_sym _Z4jogoPA21_b
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0019ba00_00000000-6_q1try.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3687:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3687:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "#"
.LC1:
.string "_"
.LC2:
.string "\n\n\n\n\n"
.text
.globl _Z5printPA21_b
.type _Z5printPA21_b, @function
_Z5printPA21_b:
.LFB3682:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r15
movl $5, %edx
leaq .LC2(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
leaq 41(%r15), %rbp
addq $440, %r15
leaq .LC1(%rip), %r14
leaq .LC0(%rip), %r13
leaq _ZSt4cout(%rip), %r12
jmp .L4
.L14:
call _ZSt16__throw_bad_castv@PLT
.L15:
movzbl 67(%rbx), %esi
.L9:
movsbl %sil, %esi
movq %r12, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $21, %rbp
cmpq %r15, %rbp
je .L3
.L4:
leaq -19(%rbp), %rbx
.L6:
cmpb $0, (%rbx)
movq %r13, %rsi
cmove %r14, %rsi
movl $1, %edx
movq %r12, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $1, %rbx
cmpq %rbx, %rbp
jne .L6
movq (%r12), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %rbx
testq %rbx, %rbx
je .L14
cmpb $0, 56(%rbx)
jne .L15
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L9
.L3:
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3682:
.size _Z5printPA21_b, .-_Z5printPA21_b
.globl _Z12someoneAlivePA21_b
.type _Z12someoneAlivePA21_b, @function
_Z12someoneAlivePA21_b:
.LFB3683:
.cfi_startproc
endbr64
leaq 21(%rdi), %rcx
addq $462, %rdi
.L17:
leaq -21(%rcx), %rax
.L19:
movzbl (%rax), %edx
testb %dl, %dl
jne .L16
addq $1, %rax
cmpq %rcx, %rax
jne .L19
addq $21, %rcx
cmpq %rdi, %rcx
jne .L17
.L16:
movl %edx, %eax
ret
.cfi_endproc
.LFE3683:
.size _Z12someoneAlivePA21_b, .-_Z12someoneAlivePA21_b
.globl _Z27__device_stub__Z4jogoPA21_bPA21_b
.type _Z27__device_stub__Z4jogoPA21_bPA21_b, @function
_Z27__device_stub__Z4jogoPA21_bPA21_b:
.LFB3709:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L25
.L21:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4jogoPA21_b(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L21
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3709:
.size _Z27__device_stub__Z4jogoPA21_bPA21_b, .-_Z27__device_stub__Z4jogoPA21_bPA21_b
.globl _Z4jogoPA21_b
.type _Z4jogoPA21_b, @function
_Z4jogoPA21_b:
.LFB3710:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z4jogoPA21_bPA21_b
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3710:
.size _Z4jogoPA21_b, .-_Z4jogoPA21_b
.globl main
.type main, @function
main:
.LFB3684:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $944, %rsp
.cfi_def_cfa_offset 960
movq %fs:40, %rax
movq %rax, 936(%rsp)
xorl %eax, %eax
leaq 32(%rsp), %rdi
movl $55, %ecx
rep stosq
movb $0, (%rdi)
movb $1, 144(%rsp)
movb $1, 166(%rsp)
movb $1, 208(%rsp)
movb $1, 167(%rsp)
movb $1, 210(%rsp)
movb $1, 231(%rsp)
movb $1, 211(%rsp)
movb $1, 253(%rsp)
movb $1, 254(%rsp)
leaq 480(%rsp), %rdi
movl $441, %esi
call cudaMalloc@PLT
movl $4, 8(%rsp)
movl $4, 12(%rsp)
movl $1, 16(%rsp)
movl $7, 20(%rsp)
movl $7, 24(%rsp)
movl $1, 28(%rsp)
leaq 32(%rsp), %rdi
call _Z12someoneAlivePA21_b
testb %al, %al
je .L30
leaq 32(%rsp), %rsi
leaq 480(%rsp), %rdi
movl $1, %ecx
movl $441, %edx
call cudaMemcpy@PLT
movl 28(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movq 8(%rsp), %rdi
movl 16(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L35
.L31:
call cudaDeviceSynchronize@PLT
leaq 480(%rsp), %rsi
leaq 32(%rsp), %rbx
movl $2, %ecx
movl $441, %edx
movq %rbx, %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call _Z5printPA21_b
movl $100000, %edi
call usleep@PLT
.L32:
movq 936(%rsp), %rax
subq %fs:40, %rax
jne .L36
movl $0, %eax
addq $944, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L35:
.cfi_restore_state
leaq 480(%rsp), %rdi
call _Z27__device_stub__Z4jogoPA21_bPA21_b
jmp .L31
.L30:
leaq 480(%rsp), %rdi
call cudaFree@PLT
jmp .L32
.L36:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3684:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z4jogoPA21_b"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3712:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z4jogoPA21_b(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3712:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "q1try.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z5printPA21_b # -- Begin function _Z5printPA21_b
.p2align 4, 0x90
.type _Z5printPA21_b,@function
_Z5printPA21_b: # @_Z5printPA21_b
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $5, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
addq $22, %rbx
movl $1, %r15d
jmp .LBB0_1
.p2align 4, 0x90
.LBB0_8: # in Loop: Header=BB0_1 Depth=1
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB0_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB0_1 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r15
addq $21, %rbx
cmpq $20, %r15
je .LBB0_10
.LBB0_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_2 Depth 2
xorl %r14d, %r14d
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_4: # in Loop: Header=BB0_2 Depth=2
movl $_ZSt4cout, %edi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r14
cmpq $19, %r14
je .LBB0_5
.LBB0_2: # Parent Loop BB0_1 Depth=1
# => This Inner Loop Header: Depth=2
cmpb $0, (%rbx,%r14)
movl $.L.str.2, %esi
je .LBB0_4
# %bb.3: # in Loop: Header=BB0_2 Depth=2
movl $.L.str.1, %esi
jmp .LBB0_4
.p2align 4, 0x90
.LBB0_5: # in Loop: Header=BB0_1 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB0_11
# %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB0_1 Depth=1
cmpb $0, 56(%r14)
je .LBB0_8
# %bb.7: # in Loop: Header=BB0_1 Depth=1
movzbl 67(%r14), %eax
jmp .LBB0_9
.LBB0_10:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB0_11:
.cfi_def_cfa_offset 32
callq _ZSt16__throw_bad_castv
.Lfunc_end0:
.size _Z5printPA21_b, .Lfunc_end0-_Z5printPA21_b
.cfi_endproc
# -- End function
.globl _Z12someoneAlivePA21_b # -- Begin function _Z12someoneAlivePA21_b
.p2align 4, 0x90
.type _Z12someoneAlivePA21_b,@function
_Z12someoneAlivePA21_b: # @_Z12someoneAlivePA21_b
.cfi_startproc
# %bb.0:
leaq 1(%rdi), %rcx
movb $1, %al
xorl %edx, %edx
jmp .LBB1_1
.p2align 4, 0x90
.LBB1_6: # %.critedge
# in Loop: Header=BB1_1 Depth=1
cmpq $20, %rdx
leaq 1(%rdx), %rsi
setb %al
addq $21, %rcx
movq %rsi, %rdx
cmpq $21, %rsi
je .LBB1_7
.LBB1_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_3 Depth 2
leaq (%rdx,%rdx,4), %rsi
leaq (%rdx,%rsi,4), %rsi
cmpb $0, (%rdi,%rsi)
jne .LBB1_7
# %bb.2: # %.lr.ph.preheader
# in Loop: Header=BB1_1 Depth=1
movq $-1, %rsi
.p2align 4, 0x90
.LBB1_3: # %.lr.ph
# Parent Loop BB1_1 Depth=1
# => This Inner Loop Header: Depth=2
cmpq $19, %rsi
je .LBB1_6
# %bb.4: # in Loop: Header=BB1_3 Depth=2
leaq 1(%rsi), %r8
cmpb $0, 1(%rcx,%rsi)
movq %r8, %rsi
je .LBB1_3
# %bb.5: # %._crit_edge
# in Loop: Header=BB1_1 Depth=1
cmpq $20, %r8
jae .LBB1_6
.LBB1_7: # %.critedge26
andb $1, %al
retq
.Lfunc_end1:
.size _Z12someoneAlivePA21_b, .Lfunc_end1-_Z12someoneAlivePA21_b
.cfi_endproc
# -- End function
.globl _Z19__device_stub__jogoPA21_b # -- Begin function _Z19__device_stub__jogoPA21_b
.p2align 4, 0x90
.type _Z19__device_stub__jogoPA21_b,@function
_Z19__device_stub__jogoPA21_b: # @_Z19__device_stub__jogoPA21_b
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z4jogoPA21_b, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end2:
.size _Z19__device_stub__jogoPA21_b, .Lfunc_end2-_Z19__device_stub__jogoPA21_b
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $960, %rsp # imm = 0x3C0
.cfi_def_cfa_offset 976
.cfi_offset %rbx, -16
leaq 64(%rsp), %rdi
movl $441, %edx # imm = 0x1B9
xorl %esi, %esi
callq memset@PLT
movb $1, 176(%rsp)
movw $257, 198(%rsp) # imm = 0x101
movb $1, 240(%rsp)
movw $257, 242(%rsp) # imm = 0x101
movb $1, 263(%rsp)
movw $257, 285(%rsp) # imm = 0x101
leaq 512(%rsp), %rdi
movl $441, %esi # imm = 0x1B9
callq hipMalloc
cmpb $0, 64(%rsp)
jne .LBB3_9
# %bb.1: # %.lr.ph.preheader.preheader
movb $1, %al
leaq 65(%rsp), %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB3_3: # %.lr.ph.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
movq %rdx, %rsi
movq $-1, %rdx
.p2align 4, 0x90
.LBB3_4: # %.lr.ph
# Parent Loop BB3_3 Depth=1
# => This Inner Loop Header: Depth=2
cmpq $19, %rdx
je .LBB3_7
# %bb.5: # in Loop: Header=BB3_4 Depth=2
leaq 1(%rdx), %rdi
cmpb $0, 1(%rcx,%rdx)
movq %rdi, %rdx
je .LBB3_4
# %bb.6: # %._crit_edge
# in Loop: Header=BB3_3 Depth=1
cmpq $20, %rdi
jb .LBB3_8
.LBB3_7: # %.critedge.i
# in Loop: Header=BB3_3 Depth=1
leaq 1(%rsi), %rdx
cmpq $20, %rsi
setb %al
cmpq $21, %rdx
je .LBB3_8
# %bb.2: # %.preheader.i
# in Loop: Header=BB3_3 Depth=1
leaq (%rdx,%rdx,4), %rsi
leaq (%rdx,%rsi,4), %rsi
addq $21, %rcx
cmpb $0, 64(%rsp,%rsi)
je .LBB3_3
.LBB3_8: # %_Z12someoneAlivePA21_b.exit
testb $1, %al
je .LBB3_12
.LBB3_9: # %.critedge
leaq 512(%rsp), %rbx
leaq 64(%rsp), %rsi
movl $441, %edx # imm = 0x1B9
movq %rbx, %rdi
movl $1, %ecx
callq hipMemcpy
movabsq $17179869188, %rdi # imm = 0x400000004
movabsq $30064771079, %rdx # imm = 0x700000007
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_11
# %bb.10:
movq %rbx, 56(%rsp)
leaq 56(%rsp), %rax
movq %rax, (%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
movq %rsp, %r9
movl $_Z4jogoPA21_b, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_11:
callq hipDeviceSynchronize
leaq 64(%rsp), %rbx
leaq 512(%rsp), %rsi
movl $441, %edx # imm = 0x1B9
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq %rbx, %rdi
callq _Z5printPA21_b
movl $100000, %edi # imm = 0x186A0
callq usleep
.LBB3_13:
xorl %eax, %eax
addq $960, %rsp # imm = 0x3C0
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB3_12:
.cfi_def_cfa_offset 976
leaq 512(%rsp), %rdi
callq hipFree
jmp .LBB3_13
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4jogoPA21_b, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\n\n\n\n\n"
.size .L.str, 6
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "#"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "_"
.size .L.str.2, 2
.type _Z4jogoPA21_b,@object # @_Z4jogoPA21_b
.section .rodata,"a",@progbits
.globl _Z4jogoPA21_b
.p2align 3, 0x0
_Z4jogoPA21_b:
.quad _Z19__device_stub__jogoPA21_b
.size _Z4jogoPA21_b, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z4jogoPA21_b"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__jogoPA21_b
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZSt4cout
.addrsig_sym _Z4jogoPA21_b
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement, float* out, int outStride0, int outStride1, int outStride2, int dim) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < nElement; i += stride) {
int inOff2 = i / inSize3;
int inDim3 = i - inOff2 * inSize3;
int inOff1 = inOff2 / inSize2;
int inDim2 = inOff2 - inOff1 * inSize2;
int inDim0 = inOff1 / inSize1;
int inDim1 = inOff1 - inDim0 * inSize1;
int outOff = 0;
if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2;
if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2;
in[i] += out[outOff];
}
} | .file "tmpxft_0015b477_00000000-6_sum_grad.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z8sum_gradPfiiiiiS_iiiiPfiiiiiS_iiii
.type _Z38__device_stub__Z8sum_gradPfiiiiiS_iiiiPfiiiiiS_iiii, @function
_Z38__device_stub__Z8sum_gradPfiiiiiS_iiiiPfiiiiiS_iiii:
.LFB2051:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movq %rdi, 40(%rsp)
movl %esi, 36(%rsp)
movl %edx, 32(%rsp)
movl %ecx, 28(%rsp)
movl %r8d, 24(%rsp)
movl %r9d, 20(%rsp)
movq 224(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 32(%rsp), %rax
movq %rax, 128(%rsp)
leaq 28(%rsp), %rax
movq %rax, 136(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
leaq 20(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
leaq 232(%rsp), %rax
movq %rax, 168(%rsp)
leaq 240(%rsp), %rax
movq %rax, 176(%rsp)
leaq 248(%rsp), %rax
movq %rax, 184(%rsp)
leaq 256(%rsp), %rax
movq %rax, 192(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 232
pushq 56(%rsp)
.cfi_def_cfa_offset 240
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z8sum_gradPfiiiiiS_iiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z8sum_gradPfiiiiiS_iiiiPfiiiiiS_iiii, .-_Z38__device_stub__Z8sum_gradPfiiiiiS_iiiiPfiiiiiS_iiii
.globl _Z8sum_gradPfiiiiiS_iiii
.type _Z8sum_gradPfiiiiiS_iiii, @function
_Z8sum_gradPfiiiiiS_iiii:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
pushq 56(%rsp)
.cfi_def_cfa_offset 64
call _Z38__device_stub__Z8sum_gradPfiiiiiS_iiiiPfiiiiiS_iiii
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z8sum_gradPfiiiiiS_iiii, .-_Z8sum_gradPfiiiiiS_iiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z8sum_gradPfiiiiiS_iiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8sum_gradPfiiiiiS_iiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement, float* out, int outStride0, int outStride1, int outStride2, int dim) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < nElement; i += stride) {
int inOff2 = i / inSize3;
int inDim3 = i - inOff2 * inSize3;
int inOff1 = inOff2 / inSize2;
int inDim2 = inOff2 - inOff1 * inSize2;
int inDim0 = inOff1 / inSize1;
int inDim1 = inOff1 - inDim0 * inSize1;
int outOff = 0;
if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2;
if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2;
in[i] += out[outOff];
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement, float* out, int outStride0, int outStride1, int outStride2, int dim) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < nElement; i += stride) {
int inOff2 = i / inSize3;
int inDim3 = i - inOff2 * inSize3;
int inOff1 = inOff2 / inSize2;
int inDim2 = inOff2 - inOff1 * inSize2;
int inDim0 = inOff1 / inSize1;
int inDim1 = inOff1 - inDim0 * inSize1;
int outOff = 0;
if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2;
if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2;
in[i] += out[outOff];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement, float* out, int outStride0, int outStride1, int outStride2, int dim) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < nElement; i += stride) {
int inOff2 = i / inSize3;
int inDim3 = i - inOff2 * inSize3;
int inOff1 = inOff2 / inSize2;
int inDim2 = inOff2 - inOff1 * inSize2;
int inDim0 = inOff1 / inSize1;
int inDim1 = inOff1 - inDim0 * inSize1;
int outOff = 0;
if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2;
if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2;
in[i] += out[outOff];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8sum_gradPfiiiiiS_iiii
.globl _Z8sum_gradPfiiiiiS_iiii
.p2align 8
.type _Z8sum_gradPfiiiiiS_iiii,@function
_Z8sum_gradPfiiiiiS_iiii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x44
s_load_b32 s14, s[0:1], 0x18
s_add_u32 s4, s0, 56
s_addc_u32 s5, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s10, s2, 0xffff
s_mov_b32 s2, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s10, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s14, v1
s_cbranch_execz .LBB0_11
s_clause 0x2
s_load_b64 s[2:3], s[0:1], 0x30
s_load_b32 s24, s[0:1], 0x14
s_load_b64 s[8:9], s[0:1], 0xc
s_load_b32 s11, s[4:5], 0x0
s_load_b64 s[12:13], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_cmp_eq_u32 s3, 0
s_cselect_b32 s15, -1, 0
s_cmp_eq_u32 s3, 1
s_mul_i32 s10, s11, s10
s_cselect_b32 s16, -1, 0
s_cmp_eq_u32 s3, 2
s_cselect_b32 s17, -1, 0
s_cmp_eq_u32 s3, 3
s_cselect_b32 s3, -1, 0
s_ashr_i32 s18, s24, 31
s_ashr_i32 s19, s9, 31
s_ashr_i32 s20, s8, 31
s_add_i32 s4, s24, s18
s_add_i32 s5, s9, s19
s_add_i32 s6, s8, s20
s_xor_b32 s21, s4, s18
s_xor_b32 s22, s5, s19
s_xor_b32 s23, s6, s20
s_load_b128 s[4:7], s[0:1], 0x20
v_cvt_f32_u32_e32 v0, s21
v_cvt_f32_u32_e32 v4, s22
v_cvt_f32_u32_e32 v5, s23
v_add_co_u32 v2, vcc_lo, s12, v2
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_rcp_iflag_f32_e32 v0, v0
v_rcp_iflag_f32_e32 v4, v4
s_delay_alu instid0(VALU_DEP_2)
v_rcp_iflag_f32_e32 v5, v5
v_add_co_ci_u32_e32 v3, vcc_lo, s13, v3, vcc_lo
s_ashr_i32 s11, s10, 31
s_mov_b32 s1, 0
s_lshl_b64 s[12:13], s[10:11], 2
s_sub_i32 s11, 0, s24
s_delay_alu instid0(TRANS32_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_mul_f32_e32 v0, 0x4f7ffffe, v0
s_waitcnt_depctr 0xfff
v_dual_mul_f32 v4, 0x4f7ffffe, v4 :: v_dual_mul_f32 v5, 0x4f7ffffe, v5
v_cvt_u32_f32_e32 v0, v0
v_cvt_u32_f32_e32 v6, v4
s_delay_alu instid0(VALU_DEP_3)
v_cvt_u32_f32_e32 v7, v5
s_branch .LBB0_3
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
global_load_b32 v8, v[2:3], off
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_dual_add_f32 v4, v4, v8 :: v_dual_add_nc_u32 v1, s10, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_cmp_le_i32_e32 vcc_lo, s14, v1
global_store_b32 v[2:3], v4, off
v_add_co_u32 v2, s0, v2, s12
v_add_co_ci_u32_e64 v3, s0, s13, v3, s0
s_or_b32 s1, vcc_lo, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execz .LBB0_11
.LBB0_3:
s_sub_i32 s0, 0, s21
v_ashrrev_i32_e32 v5, 31, v1
v_mul_lo_u32 v4, s0, v0
s_sub_i32 s0, 0, s22
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v8, v1, v5
v_mul_hi_u32 v4, v0, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_xor_b32_e32 v8, v8, v5
v_xor_b32_e32 v5, s18, v5
v_add_nc_u32_e32 v4, v0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v4, v8, v4
v_mul_lo_u32 v9, v4, s21
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v8, v8, v9
v_subrev_nc_u32_e32 v10, s21, v8
v_cmp_le_u32_e32 vcc_lo, s21, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v8, v8, v10 :: v_dual_add_nc_u32 v9, 1, v4
v_cndmask_b32_e32 v4, v4, v9, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_le_u32_e32 vcc_lo, s21, v8
v_add_nc_u32_e32 v9, 1, v4
v_mul_lo_u32 v8, s0, v6
s_sub_i32 s0, 0, s23
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v4, v4, v9, vcc_lo
v_xor_b32_e32 v4, v4, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_sub_nc_u32_e32 v10, v4, v5
v_mul_hi_u32 v5, v6, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v4, 31, v10
v_add_nc_u32_e32 v5, v6, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v8, v10, v4
v_xor_b32_e32 v8, v8, v4
v_xor_b32_e32 v4, s19, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v5, v8, v5
v_mul_lo_u32 v9, v5, s22
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v8, v8, v9
v_add_nc_u32_e32 v9, 1, v5
v_subrev_nc_u32_e32 v11, s22, v8
v_cmp_le_u32_e32 vcc_lo, s22, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v5, v5, v9 :: v_dual_cndmask_b32 v8, v8, v11
v_add_nc_u32_e32 v9, 1, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s22, v8
v_mul_lo_u32 v8, s0, v7
v_cndmask_b32_e32 v5, v5, v9, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v5, v5, v4
v_sub_nc_u32_e32 v11, v5, v4
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v5, v7, v8
v_ashrrev_i32_e32 v4, 31, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v5, v7, v5
v_add_nc_u32_e32 v8, v11, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_xor_b32_e32 v8, v8, v4
v_xor_b32_e32 v4, s20, v4
v_mul_hi_u32 v5, v8, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v9, v5, s23
v_sub_nc_u32_e32 v8, v8, v9
v_add_nc_u32_e32 v9, 1, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v12, s23, v8
v_cmp_le_u32_e32 vcc_lo, s23, v8
v_dual_cndmask_b32 v5, v5, v9 :: v_dual_cndmask_b32 v8, v8, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v9, 1, v5
v_cmp_le_u32_e32 vcc_lo, s23, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v5, v5, v9, vcc_lo
s_and_not1_b32 vcc_lo, exec_lo, s15
v_xor_b32_e32 v5, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_sub_nc_u32_e32 v9, v5, v4
v_mad_u64_u32 v[4:5], null, s11, v10, v[1:2]
v_mul_lo_u32 v5, v11, s9
v_mul_lo_u32 v12, v9, s8
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_mul_lo_u32 v8, v4, s2
v_mov_b32_e32 v4, 0
v_sub_nc_u32_e32 v5, v10, v5
s_delay_alu instid0(VALU_DEP_4)
v_sub_nc_u32_e32 v10, v11, v12
s_cbranch_vccnz .LBB0_5
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_lo_u32 v4, v10, s6
v_mul_lo_u32 v11, v5, s7
v_add3_u32 v4, v11, v8, v4
.LBB0_5:
s_waitcnt lgkmcnt(0)
v_mul_lo_u32 v9, v9, s6
s_and_not1_b32 vcc_lo, exec_lo, s16
s_cbranch_vccnz .LBB0_7
v_mul_lo_u32 v4, v5, s7
s_delay_alu instid0(VALU_DEP_1)
v_add3_u32 v4, v8, v9, v4
.LBB0_7:
v_mul_lo_u32 v10, v10, s7
s_and_not1_b32 vcc_lo, exec_lo, s17
s_cbranch_vccnz .LBB0_9
s_delay_alu instid0(VALU_DEP_1)
v_add3_u32 v4, v8, v9, v10
.LBB0_9:
s_and_not1_b32 vcc_lo, exec_lo, s3
s_cbranch_vccnz .LBB0_2
v_mul_lo_u32 v4, v5, s2
s_delay_alu instid0(VALU_DEP_1)
v_add3_u32 v4, v4, v9, v10
s_branch .LBB0_2
.LBB0_11:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8sum_gradPfiiiiiS_iiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 312
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 13
.amdhsa_next_free_sgpr 25
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8sum_gradPfiiiiiS_iiii, .Lfunc_end0-_Z8sum_gradPfiiiiiS_iiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 44
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: by_value
- .offset: 52
.size: 4
.value_kind: by_value
- .offset: 56
.size: 4
.value_kind: hidden_block_count_x
- .offset: 60
.size: 4
.value_kind: hidden_block_count_y
- .offset: 64
.size: 4
.value_kind: hidden_block_count_z
- .offset: 68
.size: 2
.value_kind: hidden_group_size_x
- .offset: 70
.size: 2
.value_kind: hidden_group_size_y
- .offset: 72
.size: 2
.value_kind: hidden_group_size_z
- .offset: 74
.size: 2
.value_kind: hidden_remainder_x
- .offset: 76
.size: 2
.value_kind: hidden_remainder_y
- .offset: 78
.size: 2
.value_kind: hidden_remainder_z
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 120
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 312
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8sum_gradPfiiiiiS_iiii
.private_segment_fixed_size: 0
.sgpr_count: 27
.sgpr_spill_count: 0
.symbol: _Z8sum_gradPfiiiiiS_iiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 13
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement, float* out, int outStride0, int outStride1, int outStride2, int dim) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < nElement; i += stride) {
int inOff2 = i / inSize3;
int inDim3 = i - inOff2 * inSize3;
int inOff1 = inOff2 / inSize2;
int inDim2 = inOff2 - inOff1 * inSize2;
int inDim0 = inOff1 / inSize1;
int inDim1 = inOff1 - inDim0 * inSize1;
int outOff = 0;
if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2;
if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2;
if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2;
in[i] += out[outOff];
}
} | .text
.file "sum_grad.hip"
.globl _Z23__device_stub__sum_gradPfiiiiiS_iiii # -- Begin function _Z23__device_stub__sum_gradPfiiiiiS_iiii
.p2align 4, 0x90
.type _Z23__device_stub__sum_gradPfiiiiiS_iiii,@function
_Z23__device_stub__sum_gradPfiiiiiS_iiii: # @_Z23__device_stub__sum_gradPfiiiiiS_iiii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 72(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movl %ecx, 12(%rsp)
movl %r8d, 8(%rsp)
movl %r9d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
leaq 176(%rsp), %rax
movq %rax, 128(%rsp)
leaq 184(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8sum_gradPfiiiiiS_iiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z23__device_stub__sum_gradPfiiiiiS_iiii, .Lfunc_end0-_Z23__device_stub__sum_gradPfiiiiiS_iiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8sum_gradPfiiiiiS_iiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8sum_gradPfiiiiiS_iiii,@object # @_Z8sum_gradPfiiiiiS_iiii
.section .rodata,"a",@progbits
.globl _Z8sum_gradPfiiiiiS_iiii
.p2align 3, 0x0
_Z8sum_gradPfiiiiiS_iiii:
.quad _Z23__device_stub__sum_gradPfiiiiiS_iiii
.size _Z8sum_gradPfiiiiiS_iiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8sum_gradPfiiiiiS_iiii"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__sum_gradPfiiiiiS_iiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8sum_gradPfiiiiiS_iiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0015b477_00000000-6_sum_grad.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z8sum_gradPfiiiiiS_iiiiPfiiiiiS_iiii
.type _Z38__device_stub__Z8sum_gradPfiiiiiS_iiiiPfiiiiiS_iiii, @function
_Z38__device_stub__Z8sum_gradPfiiiiiS_iiiiPfiiiiiS_iiii:
.LFB2051:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movq %rdi, 40(%rsp)
movl %esi, 36(%rsp)
movl %edx, 32(%rsp)
movl %ecx, 28(%rsp)
movl %r8d, 24(%rsp)
movl %r9d, 20(%rsp)
movq 224(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 32(%rsp), %rax
movq %rax, 128(%rsp)
leaq 28(%rsp), %rax
movq %rax, 136(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
leaq 20(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
leaq 232(%rsp), %rax
movq %rax, 168(%rsp)
leaq 240(%rsp), %rax
movq %rax, 176(%rsp)
leaq 248(%rsp), %rax
movq %rax, 184(%rsp)
leaq 256(%rsp), %rax
movq %rax, 192(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 232
pushq 56(%rsp)
.cfi_def_cfa_offset 240
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z8sum_gradPfiiiiiS_iiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z8sum_gradPfiiiiiS_iiiiPfiiiiiS_iiii, .-_Z38__device_stub__Z8sum_gradPfiiiiiS_iiiiPfiiiiiS_iiii
.globl _Z8sum_gradPfiiiiiS_iiii
.type _Z8sum_gradPfiiiiiS_iiii, @function
_Z8sum_gradPfiiiiiS_iiii:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
pushq 56(%rsp)
.cfi_def_cfa_offset 64
call _Z38__device_stub__Z8sum_gradPfiiiiiS_iiiiPfiiiiiS_iiii
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z8sum_gradPfiiiiiS_iiii, .-_Z8sum_gradPfiiiiiS_iiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z8sum_gradPfiiiiiS_iiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8sum_gradPfiiiiiS_iiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "sum_grad.hip"
.globl _Z23__device_stub__sum_gradPfiiiiiS_iiii # -- Begin function _Z23__device_stub__sum_gradPfiiiiiS_iiii
.p2align 4, 0x90
.type _Z23__device_stub__sum_gradPfiiiiiS_iiii,@function
_Z23__device_stub__sum_gradPfiiiiiS_iiii: # @_Z23__device_stub__sum_gradPfiiiiiS_iiii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 72(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movl %ecx, 12(%rsp)
movl %r8d, 8(%rsp)
movl %r9d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
leaq 176(%rsp), %rax
movq %rax, 128(%rsp)
leaq 184(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8sum_gradPfiiiiiS_iiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z23__device_stub__sum_gradPfiiiiiS_iiii, .Lfunc_end0-_Z23__device_stub__sum_gradPfiiiiiS_iiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8sum_gradPfiiiiiS_iiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8sum_gradPfiiiiiS_iiii,@object # @_Z8sum_gradPfiiiiiS_iiii
.section .rodata,"a",@progbits
.globl _Z8sum_gradPfiiiiiS_iiii
.p2align 3, 0x0
_Z8sum_gradPfiiiiiS_iiii:
.quad _Z23__device_stub__sum_gradPfiiiiiS_iiii
.size _Z8sum_gradPfiiiiiS_iiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8sum_gradPfiiiiiS_iiii"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__sum_gradPfiiiiiS_iiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8sum_gradPfiiiiiS_iiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda_runtime.h>
#include <iostream>
#include <vector>
#include <utility>
#include <stdio.h>
#include <math.h>
using namespace std;
#define K 3
#define BLCH 8
#define BLCW 32
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// declaration of constant memory where the fiter values are stored
__constant__ float cm[K*K];
__device__ void conv(const float* gm,
float* convolved,
int bh,
int bw,
int ih,
int iw,
int ch,
int cw,
int smH,
int smW,
int k,
float* sm,
int gID,
int tID,
int nT,
int rel_row,
int rel_col,
int nRows,
int stopPrefetchRowID,
int lastActiveThreadID) {
for(int i=k; i<=nRows; i++)
{
/*
----prefetch a pixel value from GM and store it in register----
all threads fetch the cell value immediately below to the current cell iteratively
last thread in the block would fetch k cells immediately below the current cell
boundary check would be needed for the blocks that act on the bottom most partition of the input image to prevent it from prefetching out of image values.
*/
float reg;
float regArr[K];
if(i <= stopPrefetchRowID){
reg = gm[i * iw + gID];
if(tID == lastActiveThreadID){
for(int j=1; j<=k-1; j++){
regArr[j] = gm[(i * iw) + gID + j];
}
}
}
// load k * k pixels above the current cell
float imgPixels[K*K];
for(int r=i-k; r<i; r++){
for(int c=0; c<k; c++){
/* translate the indices to [0,k] using r - (i-k) as imgPixels is of size k*k */
imgPixels[(r-i+k)*k + c] = sm[r * smW + tID + c];
}
}
/*multiply image pixel values with filter values (direct convolution) */
float convolvedCell = 0.0;
for(int c=0; c<k*k; c++){
convolvedCell += cm[c]*imgPixels[c];
}
//place the convolvedCell value into convolvedMatrix
int cID = ( ( (rel_row * bh) + (i-k) ) * cw )+( rel_col * nT )+tID;
if(cID < 0 || cID >= ch*cw ) {
printf("cID : %d, tID : %d, gID : %d\n", cID, tID, gID );
}
convolved[cID] = convolvedCell;
__syncthreads();
if(i <= stopPrefetchRowID){
sm[i * smW + tID] = reg;
if(tID == lastActiveThreadID){
for(int j=1; j<=k-1; j++){
int sID = i *smW + tID + j;
sm[sID] = regArr[j];
}
}
}
__syncthreads();
}
}
__global__ void conv_kernel(const float* gm,
float* convolved,
int bh,
int bw,
int ih,
int iw,
int ch,
int cw,
int smH,
int smW,
int k) {
int tID = threadIdx.x;
int bID = blockIdx.x;
int nT = blockDim.x;
int nB = gridDim.x;
int nBx = iw / nT;
//printf("num of blocks is %d\n", nB);
//printf("nB in a row is %d\n", nBx);
//check for right border or bottom border thread block
bool isBottomBorder = false;
bool isRightBorder = false;
// bottom border thread block
if(bID >= nB - nBx) {
//printf("bID : %d is bottom border\n", bID);
isBottomBorder = true;
}
// right border thread block
if((bID+1) % nBx == 0){
//printf("bID : %d is right border\n", bID);
isRightBorder = true;
}
// ---------------- Load k rows from GM into SM ----------------------
__shared__ float sm[ (BLCH + K - 1) * (BLCW + K - 1) ];
// rel_row and rel_col maps the Thread Block to appropriate position
int rel_row = bID / nBx;
int rel_col = bID % nBx;
// (rel_row * bh * iw) covers all the cells before row_ids bh, 2bh, 3bh ..
// gID finally maps threads to cells at rows 0, bh, 2bh, 3bh, ...
int gID = (rel_row * bh * iw) + (rel_col * nT) + tID;
for(int i=0; i<k; i++){
int sID = i * smW + tID;
sm[sID] = gm[i * iw + gID];
/* if last thread in the block, it should fetch additional k-1 pixels
in each row which are needed for computation of the convolution
*/
if(!isRightBorder && tID == nT-1){
for(int j=1; j<=k-1; j++){
sID = (i * smW) + tID + j;
sm[sID] = gm[i * iw + gID + j];
}
}
}
__syncthreads();
if( !isBottomBorder && !isRightBorder ){
int lastActiveThreadID = nT - 1;
int nRows = bh + k - 1;
int stopPrefetchRowID = nRows;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
else if( isBottomBorder && isRightBorder ){
/* make the last k-1 threads in the block to be idle. as there is no convolution needed for them */
if(tID < (nT - (k-1))){
int nRows = bh;
int stopPrefetchRowID = nRows - 1;
int lastActiveThreadID = nT - k;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
}
else if( isBottomBorder ){
int nRows = bh;
int stopPrefetchRowID = nRows-1;
int lastActiveThreadID = nT - 1;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
else if( isRightBorder ){
/* make the last k-1 threads in the block to be idle. as there is no convolution needed for them */
if(tID < (nT - (k-1))){
int nRows = bh + k - 1;
int stopPrefetchRowID = nRows;
int lastActiveThreadID = nT - k;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
}
}
int main(int argc, char **argv){
/* set values for image dimensions, block dimensions, filter size, stride ..
some of the constraints to keep in mind are
1. the value of k(filter size) should be less than blcH and blcW
2. stride value(s) should be 1
*/
int imgH = 2048;
int imgW = 2048;
int blcH = BLCH;
int blcW = BLCW;
int k = K;
int s = 1;
int nB = (imgH * imgW) / (blcH * blcW);
int nT = blcW;
int imgDims = imgH * imgW;
int imgSize = imgDims * sizeof(float);
// create host array that can hold pixel intensity values
float *h_img = new float[imgDims];
for(int i=0; i<imgDims; i++){
h_img[i] = 1.0;
}
// create device array that can hold pixel intensity values in GPU GM
float *d_img;
gpuErrchk(cudaMalloc((void **) &d_img, imgSize ));
gpuErrchk(cudaMemcpy(d_img, h_img, imgSize, cudaMemcpyHostToDevice));
// create filter and copy to constant memory
int filterDims = k * k;
int filterSize = filterDims * sizeof(float);
float *filter = new float[filterDims];
for(int i=0; i<filterDims; i++){
filter[i] = 0.5;
}
gpuErrchk(cudaMemcpyToSymbol(cm, filter, filterSize));
// create host and device array that holds the convoluted matrix
int convH = ( (imgH - k) / s ) + 1;
int convW = convH;
int convDims = convH * convW;
int convSize = convDims * sizeof(float);
float *h_convolved = new float[convDims];
for(int i=0; i<convDims; i++){
h_convolved[i] = 0;
}
float *d_convolved;
gpuErrchk(cudaMalloc((void **) &d_convolved, convSize));
gpuErrchk(cudaMemcpy(d_convolved, h_convolved,
convSize, cudaMemcpyHostToDevice));
// calculate shared memory dimensions
int smH = blcH + k - 1;
int smW = blcW + k - 1;
// call the kernel
conv_kernel<<<nB, nT>>>(d_img, d_convolved,
blcH, blcW,
imgH, imgW,
convH, convW,
smH, smW,
k);
gpuErrchk(cudaMemcpy(h_convolved, d_convolved,
convSize, cudaMemcpyDeviceToHost));
vector<pair<int,int> > miss;
for(int i=0; i<convH; i++){
for(int j=0; j<convW; j++){
//cout<<h_convolved[i*convW +j]<<" ";
if(h_convolved[i*convW +j] != 4.5){
miss.push_back(make_pair(i,j));
}
}
//cout<<"\n";
}
cout<<miss.size()<<"\n";
for(int i=0;i<miss.size();i++){
cout<<miss[i].first<<","<<miss[i].second<<"\n";
}
cudaDeviceReset();
delete h_img;
delete h_convolved;
return 0;
} | .file "tmpxft_000691b4_00000000-6_convolution.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4057:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata._Z9gpuAssert9cudaErrorPKcib.str1.1,"aMS",@progbits,1
.LC0:
.string "GPUassert: %s %s %d\n"
.section .text._Z9gpuAssert9cudaErrorPKcib,"axG",@progbits,_Z9gpuAssert9cudaErrorPKcib,comdat
.weak _Z9gpuAssert9cudaErrorPKcib
.type _Z9gpuAssert9cudaErrorPKcib, @function
_Z9gpuAssert9cudaErrorPKcib:
.LFB4032:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L9
ret
.L9:
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movl %edi, %ebx
movq %rsi, %r13
movl %edx, %r12d
movl %ecx, %ebp
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %r12d, %r9d
movq %r13, %r8
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
testb %bpl, %bpl
jne .L10
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L10:
.cfi_restore_state
movl %ebx, %edi
call exit@PLT
.cfi_endproc
.LFE4032:
.size _Z9gpuAssert9cudaErrorPKcib, .-_Z9gpuAssert9cudaErrorPKcib
.text
.globl _Z4convPKfPfiiiiiiiiiS1_iiiiiiii
.type _Z4convPKfPfiiiiiiiiiS1_iiiiiiii, @function
_Z4convPKfPfiiiiiiiiiS1_iiiiiiii:
.LFB4033:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE4033:
.size _Z4convPKfPfiiiiiiiiiS1_iiiiiiii, .-_Z4convPKfPfiiiiiiiiiS1_iiiiiiii
.globl _Z43__device_stub__Z11conv_kernelPKfPfiiiiiiiiiPKfPfiiiiiiiii
.type _Z43__device_stub__Z11conv_kernelPKfPfiiiiiiiiiPKfPfiiiiiiiii, @function
_Z43__device_stub__Z11conv_kernelPKfPfiiiiiiiiiPKfPfiiiiiiiii:
.LFB4079:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
leaq 208(%rsp), %rax
movq %rax, 144(%rsp)
leaq 216(%rsp), %rax
movq %rax, 152(%rsp)
leaq 224(%rsp), %rax
movq %rax, 160(%rsp)
leaq 232(%rsp), %rax
movq %rax, 168(%rsp)
leaq 240(%rsp), %rax
movq %rax, 176(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 216
pushq 40(%rsp)
.cfi_def_cfa_offset 224
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11conv_kernelPKfPfiiiiiiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4079:
.size _Z43__device_stub__Z11conv_kernelPKfPfiiiiiiiiiPKfPfiiiiiiiii, .-_Z43__device_stub__Z11conv_kernelPKfPfiiiiiiiiiPKfPfiiiiiiiii
.globl _Z11conv_kernelPKfPfiiiiiiiii
.type _Z11conv_kernelPKfPfiiiiiiiii, @function
_Z11conv_kernelPKfPfiiiiiiiii:
.LFB4080:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 64
call _Z43__device_stub__Z11conv_kernelPKfPfiiiiiiiiiPKfPfiiiiiiiii
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4080:
.size _Z11conv_kernelPKfPfiiiiiiiii, .-_Z11conv_kernelPKfPfiiiiiiiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z11conv_kernelPKfPfiiiiiiiii"
.LC2:
.string "cm"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4082:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z11conv_kernelPKfPfiiiiiiiii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $36, %r9d
movl $0, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _ZL2cm(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4082:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._ZNSt6vectorISt4pairIiiESaIS1_EED2Ev,"axG",@progbits,_ZNSt6vectorISt4pairIiiESaIS1_EED5Ev,comdat
.align 2
.weak _ZNSt6vectorISt4pairIiiESaIS1_EED2Ev
.type _ZNSt6vectorISt4pairIiiESaIS1_EED2Ev, @function
_ZNSt6vectorISt4pairIiiESaIS1_EED2Ev:
.LFB4393:
.cfi_startproc
endbr64
movq (%rdi), %rax
testq %rax, %rax
je .L26
subq $8, %rsp
.cfi_def_cfa_offset 16
movq 16(%rdi), %rsi
subq %rax, %rsi
movq %rax, %rdi
call _ZdlPvm@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.L26:
ret
.cfi_endproc
.LFE4393:
.size _ZNSt6vectorISt4pairIiiESaIS1_EED2Ev, .-_ZNSt6vectorISt4pairIiiESaIS1_EED2Ev
.weak _ZNSt6vectorISt4pairIiiESaIS1_EED1Ev
.set _ZNSt6vectorISt4pairIiiESaIS1_EED1Ev,_ZNSt6vectorISt4pairIiiESaIS1_EED2Ev
.section .rodata._ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_.str1.1,"aMS",@progbits,1
.LC3:
.string "vector::_M_realloc_insert"
.section .text._ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_,"axG",@progbits,_ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_,comdat
.align 2
.weak _ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_
.type _ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_, @function
_ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_:
.LFB4652:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdx, 8(%rsp)
movq 8(%rdi), %rbp
movq (%rdi), %r12
movq %rbp, %rax
subq %r12, %rax
sarq $3, %rax
movabsq $1152921504606846975, %rdx
cmpq %rdx, %rax
je .L47
movq %rdi, %r14
movq %rsi, %r13
movq %rsi, %rbx
cmpq %r12, %rbp
movl $1, %edx
cmovne %rax, %rdx
addq %rdx, %rax
jc .L32
movabsq $1152921504606846975, %rdx
cmpq %rdx, %rax
cmovbe %rax, %rdx
movq %rdx, 16(%rsp)
movq %rsi, %rdi
subq %r12, %rdi
movq %rdi, 24(%rsp)
movl $0, %r15d
testq %rax, %rax
je .L33
jmp .L39
.L47:
leaq .LC3(%rip), %rdi
call _ZSt20__throw_length_errorPKc@PLT
.L42:
movq %r15, %rsi
jmp .L34
.L32:
movq %rsi, %rax
subq %r12, %rax
movq %rax, 24(%rsp)
movabsq $1152921504606846975, %rax
movq %rax, 16(%rsp)
.L39:
movq 16(%rsp), %rax
leaq 0(,%rax,8), %rdi
call _Znwm@PLT
movq %rax, %r15
.L33:
movq 8(%rsp), %rax
movq (%rax), %rax
movq 24(%rsp), %rdi
movq %rax, (%r15,%rdi)
cmpq %r12, %r13
je .L42
movq %r13, %rsi
subq %r12, %rsi
addq %r15, %rsi
movq %r15, %rax
movq %r12, %rdx
.L35:
movq (%rdx), %rcx
movq %rcx, (%rax)
addq $8, %rdx
addq $8, %rax
cmpq %rax, %rsi
jne .L35
.L34:
leaq 8(%rsi), %rax
movq %rax, 8(%rsp)
cmpq %rbp, %r13
je .L36
movq %rbp, %rcx
subq %r13, %rcx
.L37:
movq (%rbx), %rdx
movq %rdx, (%rax)
addq $8, %rbx
addq $8, %rax
cmpq %rbp, %rbx
jne .L37
addq %rcx, 8(%rsp)
.L36:
testq %r12, %r12
je .L38
movq 16(%r14), %rsi
subq %r12, %rsi
movq %r12, %rdi
call _ZdlPvm@PLT
.L38:
movq %r15, (%r14)
movq 8(%rsp), %rax
movq %rax, 8(%r14)
movq 16(%rsp), %rax
leaq (%r15,%rax,8), %rax
movq %rax, 16(%r14)
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4652:
.size _ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_, .-_ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "/home/ubuntu/Datasets/stackv2/train-structured/yottabytt/convolution_kernel/master/convolution.cu"
.section .rodata.str1.1
.LC9:
.string "\n"
.LC10:
.string ","
.text
.globl main
.type main, @function
main:
.LFB4034:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4034
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $16777216, %edi
.LEHB0:
call _Znam@PLT
movq %rax, %rbp
leaq 16777216(%rax), %rdx
movss .LC4(%rip), %xmm0
.L49:
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L49
leaq 16(%rsp), %rdi
movl $16777216, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $1, %ecx
movl $233, %edx
leaq .LC5(%rip), %rbx
movq %rbx, %rsi
call _Z9gpuAssert9cudaErrorPKcib
movl $1, %ecx
movl $16777216, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $1, %ecx
movl $234, %edx
movq %rbx, %rsi
call _Z9gpuAssert9cudaErrorPKcib
movl $36, %edi
call _Znam@PLT
movq %rax, %rsi
leaq 36(%rax), %rdx
movss .LC6(%rip), %xmm0
.L50:
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L50
movl $1, %r8d
movl $0, %ecx
movl $36, %edx
leaq _ZL2cm(%rip), %rdi
call cudaMemcpyToSymbol@PLT
movl %eax, %edi
movl $1, %ecx
movl $242, %edx
leaq .LC5(%rip), %rsi
call _Z9gpuAssert9cudaErrorPKcib
movl $16744464, %edi
call _Znam@PLT
movq %rax, %rbx
movq %rax, %r13
leaq 16744464(%rax), %rdx
.L51:
movl $0x00000000, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L51
leaq 24(%rsp), %rdi
movl $16744464, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $1, %ecx
movl $253, %edx
leaq .LC5(%rip), %r12
movq %r12, %rsi
call _Z9gpuAssert9cudaErrorPKcib
movl $1, %ecx
movl $16744464, %edx
movq %rbx, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $1, %ecx
movl $254, %edx
movq %r12, %rsi
call _Z9gpuAssert9cudaErrorPKcib
movl $32, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $16384, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 48(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L74
.L52:
movl $2, %ecx
movl $16744464, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $1, %ecx
movl $266, %edx
leaq .LC5(%rip), %rsi
call _Z9gpuAssert9cudaErrorPKcib
movq $0, 48(%rsp)
movq $0, 56(%rsp)
movq $0, 64(%rsp)
movl $0, %r14d
leaq 36(%rsp), %r15
jmp .L53
.L74:
subq $8, %rsp
.cfi_def_cfa_offset 152
pushq $3
.cfi_def_cfa_offset 160
pushq $34
.cfi_def_cfa_offset 168
pushq $10
.cfi_def_cfa_offset 176
pushq $2046
.cfi_def_cfa_offset 184
pushq $2046
.cfi_def_cfa_offset 192
movl $2048, %r9d
movl $2048, %r8d
movl $32, %ecx
movl $8, %edx
movq 72(%rsp), %rsi
movq 64(%rsp), %rdi
call _Z43__device_stub__Z11conv_kernelPKfPfiiiiiiiiiPKfPfiiiiiiiii
.LEHE0:
addq $48, %rsp
.cfi_def_cfa_offset 144
jmp .L52
.L56:
leaq 48(%rsp), %rdi
movq %r15, %rdx
.LEHB1:
call _ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_
.L54:
addq $1, %r12
cmpq $2046, %r12
je .L75
.L58:
movss 0(%r13,%r12,4), %xmm0
ucomiss .LC8(%rip), %xmm0
jp .L66
je .L54
.L66:
movl %r14d, 36(%rsp)
movl %r12d, 40(%rsp)
movq 56(%rsp), %rsi
cmpq 64(%rsp), %rsi
je .L56
movq 36(%rsp), %rax
movq %rax, (%rsi)
addq $8, %rsi
movq %rsi, 56(%rsp)
jmp .L54
.L75:
addl $1, %r14d
addq $8184, %r13
cmpl $2046, %r14d
je .L59
.L53:
movl $0, %r12d
jmp .L58
.L59:
movq 56(%rsp), %r12
movq 48(%rsp), %r14
movq %r12, %r13
subq %r14, %r13
sarq $3, %r13
movq %r13, %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC9(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
cmpq %r14, %r12
je .L60
movl $0, %r12d
leaq _ZSt4cout(%rip), %r15
jmp .L61
.L76:
movl $1, %edx
leaq .LC10(%rip), %rsi
movq %rax, 8(%rsp)
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl 4(%r14,%r12,8), %esi
movq 8(%rsp), %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
leaq .LC9(%rip), %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $1, %r12
cmpq %r13, %r12
jnb .L60
.L61:
movl (%r14,%r12,8), %esi
movq %r15, %rdi
call _ZNSolsEi@PLT
jmp .L76
.L60:
call cudaDeviceReset@PLT
.LEHE1:
movl $4, %esi
movq %rbp, %rdi
call _ZdlPvm@PLT
movl $4, %esi
movq %rbx, %rdi
call _ZdlPvm@PLT
leaq 48(%rsp), %rdi
call _ZNSt6vectorISt4pairIiiESaIS1_EED1Ev
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L77
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L65:
.cfi_restore_state
endbr64
movq %rax, %rbx
leaq 48(%rsp), %rdi
call _ZNSt6vectorISt4pairIiiESaIS1_EED1Ev
movq 72(%rsp), %rax
subq %fs:40, %rax
je .L63
call __stack_chk_fail@PLT
.L63:
movq %rbx, %rdi
.LEHB2:
call _Unwind_Resume@PLT
.LEHE2:
.L77:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4034:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA4034:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE4034-.LLSDACSB4034
.LLSDACSB4034:
.uleb128 .LEHB0-.LFB4034
.uleb128 .LEHE0-.LEHB0
.uleb128 0
.uleb128 0
.uleb128 .LEHB1-.LFB4034
.uleb128 .LEHE1-.LEHB1
.uleb128 .L65-.LFB4034
.uleb128 0
.uleb128 .LEHB2-.LFB4034
.uleb128 .LEHE2-.LEHB2
.uleb128 0
.uleb128 0
.LLSDACSE4034:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL2cm
.comm _ZL2cm,36,32
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC4:
.long 1065353216
.align 4
.LC6:
.long 1056964608
.align 4
.LC8:
.long 1083179008
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda_runtime.h>
#include <iostream>
#include <vector>
#include <utility>
#include <stdio.h>
#include <math.h>
using namespace std;
#define K 3
#define BLCH 8
#define BLCW 32
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// declaration of constant memory where the fiter values are stored
__constant__ float cm[K*K];
__device__ void conv(const float* gm,
float* convolved,
int bh,
int bw,
int ih,
int iw,
int ch,
int cw,
int smH,
int smW,
int k,
float* sm,
int gID,
int tID,
int nT,
int rel_row,
int rel_col,
int nRows,
int stopPrefetchRowID,
int lastActiveThreadID) {
for(int i=k; i<=nRows; i++)
{
/*
----prefetch a pixel value from GM and store it in register----
all threads fetch the cell value immediately below to the current cell iteratively
last thread in the block would fetch k cells immediately below the current cell
boundary check would be needed for the blocks that act on the bottom most partition of the input image to prevent it from prefetching out of image values.
*/
float reg;
float regArr[K];
if(i <= stopPrefetchRowID){
reg = gm[i * iw + gID];
if(tID == lastActiveThreadID){
for(int j=1; j<=k-1; j++){
regArr[j] = gm[(i * iw) + gID + j];
}
}
}
// load k * k pixels above the current cell
float imgPixels[K*K];
for(int r=i-k; r<i; r++){
for(int c=0; c<k; c++){
/* translate the indices to [0,k] using r - (i-k) as imgPixels is of size k*k */
imgPixels[(r-i+k)*k + c] = sm[r * smW + tID + c];
}
}
/*multiply image pixel values with filter values (direct convolution) */
float convolvedCell = 0.0;
for(int c=0; c<k*k; c++){
convolvedCell += cm[c]*imgPixels[c];
}
//place the convolvedCell value into convolvedMatrix
int cID = ( ( (rel_row * bh) + (i-k) ) * cw )+( rel_col * nT )+tID;
if(cID < 0 || cID >= ch*cw ) {
printf("cID : %d, tID : %d, gID : %d\n", cID, tID, gID );
}
convolved[cID] = convolvedCell;
__syncthreads();
if(i <= stopPrefetchRowID){
sm[i * smW + tID] = reg;
if(tID == lastActiveThreadID){
for(int j=1; j<=k-1; j++){
int sID = i *smW + tID + j;
sm[sID] = regArr[j];
}
}
}
__syncthreads();
}
}
__global__ void conv_kernel(const float* gm,
float* convolved,
int bh,
int bw,
int ih,
int iw,
int ch,
int cw,
int smH,
int smW,
int k) {
int tID = threadIdx.x;
int bID = blockIdx.x;
int nT = blockDim.x;
int nB = gridDim.x;
int nBx = iw / nT;
//printf("num of blocks is %d\n", nB);
//printf("nB in a row is %d\n", nBx);
//check for right border or bottom border thread block
bool isBottomBorder = false;
bool isRightBorder = false;
// bottom border thread block
if(bID >= nB - nBx) {
//printf("bID : %d is bottom border\n", bID);
isBottomBorder = true;
}
// right border thread block
if((bID+1) % nBx == 0){
//printf("bID : %d is right border\n", bID);
isRightBorder = true;
}
// ---------------- Load k rows from GM into SM ----------------------
__shared__ float sm[ (BLCH + K - 1) * (BLCW + K - 1) ];
// rel_row and rel_col maps the Thread Block to appropriate position
int rel_row = bID / nBx;
int rel_col = bID % nBx;
// (rel_row * bh * iw) covers all the cells before row_ids bh, 2bh, 3bh ..
// gID finally maps threads to cells at rows 0, bh, 2bh, 3bh, ...
int gID = (rel_row * bh * iw) + (rel_col * nT) + tID;
for(int i=0; i<k; i++){
int sID = i * smW + tID;
sm[sID] = gm[i * iw + gID];
/* if last thread in the block, it should fetch additional k-1 pixels
in each row which are needed for computation of the convolution
*/
if(!isRightBorder && tID == nT-1){
for(int j=1; j<=k-1; j++){
sID = (i * smW) + tID + j;
sm[sID] = gm[i * iw + gID + j];
}
}
}
__syncthreads();
if( !isBottomBorder && !isRightBorder ){
int lastActiveThreadID = nT - 1;
int nRows = bh + k - 1;
int stopPrefetchRowID = nRows;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
else if( isBottomBorder && isRightBorder ){
/* make the last k-1 threads in the block to be idle. as there is no convolution needed for them */
if(tID < (nT - (k-1))){
int nRows = bh;
int stopPrefetchRowID = nRows - 1;
int lastActiveThreadID = nT - k;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
}
else if( isBottomBorder ){
int nRows = bh;
int stopPrefetchRowID = nRows-1;
int lastActiveThreadID = nT - 1;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
else if( isRightBorder ){
/* make the last k-1 threads in the block to be idle. as there is no convolution needed for them */
if(tID < (nT - (k-1))){
int nRows = bh + k - 1;
int stopPrefetchRowID = nRows;
int lastActiveThreadID = nT - k;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
}
}
int main(int argc, char **argv){
/* set values for image dimensions, block dimensions, filter size, stride ..
some of the constraints to keep in mind are
1. the value of k(filter size) should be less than blcH and blcW
2. stride value(s) should be 1
*/
int imgH = 2048;
int imgW = 2048;
int blcH = BLCH;
int blcW = BLCW;
int k = K;
int s = 1;
int nB = (imgH * imgW) / (blcH * blcW);
int nT = blcW;
int imgDims = imgH * imgW;
int imgSize = imgDims * sizeof(float);
// create host array that can hold pixel intensity values
float *h_img = new float[imgDims];
for(int i=0; i<imgDims; i++){
h_img[i] = 1.0;
}
// create device array that can hold pixel intensity values in GPU GM
float *d_img;
gpuErrchk(cudaMalloc((void **) &d_img, imgSize ));
gpuErrchk(cudaMemcpy(d_img, h_img, imgSize, cudaMemcpyHostToDevice));
// create filter and copy to constant memory
int filterDims = k * k;
int filterSize = filterDims * sizeof(float);
float *filter = new float[filterDims];
for(int i=0; i<filterDims; i++){
filter[i] = 0.5;
}
gpuErrchk(cudaMemcpyToSymbol(cm, filter, filterSize));
// create host and device array that holds the convoluted matrix
int convH = ( (imgH - k) / s ) + 1;
int convW = convH;
int convDims = convH * convW;
int convSize = convDims * sizeof(float);
float *h_convolved = new float[convDims];
for(int i=0; i<convDims; i++){
h_convolved[i] = 0;
}
float *d_convolved;
gpuErrchk(cudaMalloc((void **) &d_convolved, convSize));
gpuErrchk(cudaMemcpy(d_convolved, h_convolved,
convSize, cudaMemcpyHostToDevice));
// calculate shared memory dimensions
int smH = blcH + k - 1;
int smW = blcW + k - 1;
// call the kernel
conv_kernel<<<nB, nT>>>(d_img, d_convolved,
blcH, blcW,
imgH, imgW,
convH, convW,
smH, smW,
k);
gpuErrchk(cudaMemcpy(h_convolved, d_convolved,
convSize, cudaMemcpyDeviceToHost));
vector<pair<int,int> > miss;
for(int i=0; i<convH; i++){
for(int j=0; j<convW; j++){
//cout<<h_convolved[i*convW +j]<<" ";
if(h_convolved[i*convW +j] != 4.5){
miss.push_back(make_pair(i,j));
}
}
//cout<<"\n";
}
cout<<miss.size()<<"\n";
for(int i=0;i<miss.size();i++){
cout<<miss[i].first<<","<<miss[i].second<<"\n";
}
cudaDeviceReset();
delete h_img;
delete h_convolved;
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#include <utility>
#include <stdio.h>
#include <math.h>
using namespace std;
#define K 3
#define BLCH 8
#define BLCW 32
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// declaration of constant memory where the fiter values are stored
__constant__ float cm[K*K];
__device__ void conv(const float* gm,
float* convolved,
int bh,
int bw,
int ih,
int iw,
int ch,
int cw,
int smH,
int smW,
int k,
float* sm,
int gID,
int tID,
int nT,
int rel_row,
int rel_col,
int nRows,
int stopPrefetchRowID,
int lastActiveThreadID) {
for(int i=k; i<=nRows; i++)
{
/*
----prefetch a pixel value from GM and store it in register----
all threads fetch the cell value immediately below to the current cell iteratively
last thread in the block would fetch k cells immediately below the current cell
boundary check would be needed for the blocks that act on the bottom most partition of the input image to prevent it from prefetching out of image values.
*/
float reg;
float regArr[K];
if(i <= stopPrefetchRowID){
reg = gm[i * iw + gID];
if(tID == lastActiveThreadID){
for(int j=1; j<=k-1; j++){
regArr[j] = gm[(i * iw) + gID + j];
}
}
}
// load k * k pixels above the current cell
float imgPixels[K*K];
for(int r=i-k; r<i; r++){
for(int c=0; c<k; c++){
/* translate the indices to [0,k] using r - (i-k) as imgPixels is of size k*k */
imgPixels[(r-i+k)*k + c] = sm[r * smW + tID + c];
}
}
/*multiply image pixel values with filter values (direct convolution) */
float convolvedCell = 0.0;
for(int c=0; c<k*k; c++){
convolvedCell += cm[c]*imgPixels[c];
}
//place the convolvedCell value into convolvedMatrix
int cID = ( ( (rel_row * bh) + (i-k) ) * cw )+( rel_col * nT )+tID;
if(cID < 0 || cID >= ch*cw ) {
printf("cID : %d, tID : %d, gID : %d\n", cID, tID, gID );
}
convolved[cID] = convolvedCell;
__syncthreads();
if(i <= stopPrefetchRowID){
sm[i * smW + tID] = reg;
if(tID == lastActiveThreadID){
for(int j=1; j<=k-1; j++){
int sID = i *smW + tID + j;
sm[sID] = regArr[j];
}
}
}
__syncthreads();
}
}
__global__ void conv_kernel(const float* gm,
float* convolved,
int bh,
int bw,
int ih,
int iw,
int ch,
int cw,
int smH,
int smW,
int k) {
int tID = threadIdx.x;
int bID = blockIdx.x;
int nT = blockDim.x;
int nB = gridDim.x;
int nBx = iw / nT;
//printf("num of blocks is %d\n", nB);
//printf("nB in a row is %d\n", nBx);
//check for right border or bottom border thread block
bool isBottomBorder = false;
bool isRightBorder = false;
// bottom border thread block
if(bID >= nB - nBx) {
//printf("bID : %d is bottom border\n", bID);
isBottomBorder = true;
}
// right border thread block
if((bID+1) % nBx == 0){
//printf("bID : %d is right border\n", bID);
isRightBorder = true;
}
// ---------------- Load k rows from GM into SM ----------------------
__shared__ float sm[ (BLCH + K - 1) * (BLCW + K - 1) ];
// rel_row and rel_col maps the Thread Block to appropriate position
int rel_row = bID / nBx;
int rel_col = bID % nBx;
// (rel_row * bh * iw) covers all the cells before row_ids bh, 2bh, 3bh ..
// gID finally maps threads to cells at rows 0, bh, 2bh, 3bh, ...
int gID = (rel_row * bh * iw) + (rel_col * nT) + tID;
for(int i=0; i<k; i++){
int sID = i * smW + tID;
sm[sID] = gm[i * iw + gID];
/* if last thread in the block, it should fetch additional k-1 pixels
in each row which are needed for computation of the convolution
*/
if(!isRightBorder && tID == nT-1){
for(int j=1; j<=k-1; j++){
sID = (i * smW) + tID + j;
sm[sID] = gm[i * iw + gID + j];
}
}
}
__syncthreads();
if( !isBottomBorder && !isRightBorder ){
int lastActiveThreadID = nT - 1;
int nRows = bh + k - 1;
int stopPrefetchRowID = nRows;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
else if( isBottomBorder && isRightBorder ){
/* make the last k-1 threads in the block to be idle. as there is no convolution needed for them */
if(tID < (nT - (k-1))){
int nRows = bh;
int stopPrefetchRowID = nRows - 1;
int lastActiveThreadID = nT - k;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
}
else if( isBottomBorder ){
int nRows = bh;
int stopPrefetchRowID = nRows-1;
int lastActiveThreadID = nT - 1;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
else if( isRightBorder ){
/* make the last k-1 threads in the block to be idle. as there is no convolution needed for them */
if(tID < (nT - (k-1))){
int nRows = bh + k - 1;
int stopPrefetchRowID = nRows;
int lastActiveThreadID = nT - k;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
}
}
int main(int argc, char **argv){
/* set values for image dimensions, block dimensions, filter size, stride ..
some of the constraints to keep in mind are
1. the value of k(filter size) should be less than blcH and blcW
2. stride value(s) should be 1
*/
int imgH = 2048;
int imgW = 2048;
int blcH = BLCH;
int blcW = BLCW;
int k = K;
int s = 1;
int nB = (imgH * imgW) / (blcH * blcW);
int nT = blcW;
int imgDims = imgH * imgW;
int imgSize = imgDims * sizeof(float);
// create host array that can hold pixel intensity values
float *h_img = new float[imgDims];
for(int i=0; i<imgDims; i++){
h_img[i] = 1.0;
}
// create device array that can hold pixel intensity values in GPU GM
float *d_img;
gpuErrchk(hipMalloc((void **) &d_img, imgSize ));
gpuErrchk(hipMemcpy(d_img, h_img, imgSize, hipMemcpyHostToDevice));
// create filter and copy to constant memory
int filterDims = k * k;
int filterSize = filterDims * sizeof(float);
float *filter = new float[filterDims];
for(int i=0; i<filterDims; i++){
filter[i] = 0.5;
}
gpuErrchk(hipMemcpyToSymbol(HIP_SYMBOL(cm), filter, filterSize));
// create host and device array that holds the convoluted matrix
int convH = ( (imgH - k) / s ) + 1;
int convW = convH;
int convDims = convH * convW;
int convSize = convDims * sizeof(float);
float *h_convolved = new float[convDims];
for(int i=0; i<convDims; i++){
h_convolved[i] = 0;
}
float *d_convolved;
gpuErrchk(hipMalloc((void **) &d_convolved, convSize));
gpuErrchk(hipMemcpy(d_convolved, h_convolved,
convSize, hipMemcpyHostToDevice));
// calculate shared memory dimensions
int smH = blcH + k - 1;
int smW = blcW + k - 1;
// call the kernel
conv_kernel<<<nB, nT>>>(d_img, d_convolved,
blcH, blcW,
imgH, imgW,
convH, convW,
smH, smW,
k);
gpuErrchk(hipMemcpy(h_convolved, d_convolved,
convSize, hipMemcpyDeviceToHost));
vector<pair<int,int> > miss;
for(int i=0; i<convH; i++){
for(int j=0; j<convW; j++){
//cout<<h_convolved[i*convW +j]<<" ";
if(h_convolved[i*convW +j] != 4.5){
miss.push_back(make_pair(i,j));
}
}
//cout<<"\n";
}
cout<<miss.size()<<"\n";
for(int i=0;i<miss.size();i++){
cout<<miss[i].first<<","<<miss[i].second<<"\n";
}
hipDeviceReset();
delete h_img;
delete h_convolved;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#include <utility>
#include <stdio.h>
#include <math.h>
using namespace std;
#define K 3
#define BLCH 8
#define BLCW 32
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// declaration of constant memory where the fiter values are stored
__constant__ float cm[K*K];
__device__ void conv(const float* gm,
float* convolved,
int bh,
int bw,
int ih,
int iw,
int ch,
int cw,
int smH,
int smW,
int k,
float* sm,
int gID,
int tID,
int nT,
int rel_row,
int rel_col,
int nRows,
int stopPrefetchRowID,
int lastActiveThreadID) {
for(int i=k; i<=nRows; i++)
{
/*
----prefetch a pixel value from GM and store it in register----
all threads fetch the cell value immediately below to the current cell iteratively
last thread in the block would fetch k cells immediately below the current cell
boundary check would be needed for the blocks that act on the bottom most partition of the input image to prevent it from prefetching out of image values.
*/
float reg;
float regArr[K];
if(i <= stopPrefetchRowID){
reg = gm[i * iw + gID];
if(tID == lastActiveThreadID){
for(int j=1; j<=k-1; j++){
regArr[j] = gm[(i * iw) + gID + j];
}
}
}
// load k * k pixels above the current cell
float imgPixels[K*K];
for(int r=i-k; r<i; r++){
for(int c=0; c<k; c++){
/* translate the indices to [0,k] using r - (i-k) as imgPixels is of size k*k */
imgPixels[(r-i+k)*k + c] = sm[r * smW + tID + c];
}
}
/*multiply image pixel values with filter values (direct convolution) */
float convolvedCell = 0.0;
for(int c=0; c<k*k; c++){
convolvedCell += cm[c]*imgPixels[c];
}
//place the convolvedCell value into convolvedMatrix
int cID = ( ( (rel_row * bh) + (i-k) ) * cw )+( rel_col * nT )+tID;
if(cID < 0 || cID >= ch*cw ) {
printf("cID : %d, tID : %d, gID : %d\n", cID, tID, gID );
}
convolved[cID] = convolvedCell;
__syncthreads();
if(i <= stopPrefetchRowID){
sm[i * smW + tID] = reg;
if(tID == lastActiveThreadID){
for(int j=1; j<=k-1; j++){
int sID = i *smW + tID + j;
sm[sID] = regArr[j];
}
}
}
__syncthreads();
}
}
__global__ void conv_kernel(const float* gm,
float* convolved,
int bh,
int bw,
int ih,
int iw,
int ch,
int cw,
int smH,
int smW,
int k) {
int tID = threadIdx.x;
int bID = blockIdx.x;
int nT = blockDim.x;
int nB = gridDim.x;
int nBx = iw / nT;
//printf("num of blocks is %d\n", nB);
//printf("nB in a row is %d\n", nBx);
//check for right border or bottom border thread block
bool isBottomBorder = false;
bool isRightBorder = false;
// bottom border thread block
if(bID >= nB - nBx) {
//printf("bID : %d is bottom border\n", bID);
isBottomBorder = true;
}
// right border thread block
if((bID+1) % nBx == 0){
//printf("bID : %d is right border\n", bID);
isRightBorder = true;
}
// ---------------- Load k rows from GM into SM ----------------------
__shared__ float sm[ (BLCH + K - 1) * (BLCW + K - 1) ];
// rel_row and rel_col maps the Thread Block to appropriate position
int rel_row = bID / nBx;
int rel_col = bID % nBx;
// (rel_row * bh * iw) covers all the cells before row_ids bh, 2bh, 3bh ..
// gID finally maps threads to cells at rows 0, bh, 2bh, 3bh, ...
int gID = (rel_row * bh * iw) + (rel_col * nT) + tID;
for(int i=0; i<k; i++){
int sID = i * smW + tID;
sm[sID] = gm[i * iw + gID];
/* if last thread in the block, it should fetch additional k-1 pixels
in each row which are needed for computation of the convolution
*/
if(!isRightBorder && tID == nT-1){
for(int j=1; j<=k-1; j++){
sID = (i * smW) + tID + j;
sm[sID] = gm[i * iw + gID + j];
}
}
}
__syncthreads();
if( !isBottomBorder && !isRightBorder ){
int lastActiveThreadID = nT - 1;
int nRows = bh + k - 1;
int stopPrefetchRowID = nRows;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
else if( isBottomBorder && isRightBorder ){
/* make the last k-1 threads in the block to be idle. as there is no convolution needed for them */
if(tID < (nT - (k-1))){
int nRows = bh;
int stopPrefetchRowID = nRows - 1;
int lastActiveThreadID = nT - k;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
}
else if( isBottomBorder ){
int nRows = bh;
int stopPrefetchRowID = nRows-1;
int lastActiveThreadID = nT - 1;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
else if( isRightBorder ){
/* make the last k-1 threads in the block to be idle. as there is no convolution needed for them */
if(tID < (nT - (k-1))){
int nRows = bh + k - 1;
int stopPrefetchRowID = nRows;
int lastActiveThreadID = nT - k;
conv( gm, convolved, bh, bw,
ih, iw, ch, cw, smH, smW, k,
sm, gID, tID, nT, rel_row, rel_col,
nRows, stopPrefetchRowID, lastActiveThreadID );
}
}
}
int main(int argc, char **argv){
/* set values for image dimensions, block dimensions, filter size, stride ..
some of the constraints to keep in mind are
1. the value of k(filter size) should be less than blcH and blcW
2. stride value(s) should be 1
*/
int imgH = 2048;
int imgW = 2048;
int blcH = BLCH;
int blcW = BLCW;
int k = K;
int s = 1;
int nB = (imgH * imgW) / (blcH * blcW);
int nT = blcW;
int imgDims = imgH * imgW;
int imgSize = imgDims * sizeof(float);
// create host array that can hold pixel intensity values
float *h_img = new float[imgDims];
for(int i=0; i<imgDims; i++){
h_img[i] = 1.0;
}
// create device array that can hold pixel intensity values in GPU GM
float *d_img;
gpuErrchk(hipMalloc((void **) &d_img, imgSize ));
gpuErrchk(hipMemcpy(d_img, h_img, imgSize, hipMemcpyHostToDevice));
// create filter and copy to constant memory
int filterDims = k * k;
int filterSize = filterDims * sizeof(float);
float *filter = new float[filterDims];
for(int i=0; i<filterDims; i++){
filter[i] = 0.5;
}
gpuErrchk(hipMemcpyToSymbol(HIP_SYMBOL(cm), filter, filterSize));
// create host and device array that holds the convoluted matrix
int convH = ( (imgH - k) / s ) + 1;
int convW = convH;
int convDims = convH * convW;
int convSize = convDims * sizeof(float);
float *h_convolved = new float[convDims];
for(int i=0; i<convDims; i++){
h_convolved[i] = 0;
}
float *d_convolved;
gpuErrchk(hipMalloc((void **) &d_convolved, convSize));
gpuErrchk(hipMemcpy(d_convolved, h_convolved,
convSize, hipMemcpyHostToDevice));
// calculate shared memory dimensions
int smH = blcH + k - 1;
int smW = blcW + k - 1;
// call the kernel
conv_kernel<<<nB, nT>>>(d_img, d_convolved,
blcH, blcW,
imgH, imgW,
convH, convW,
smH, smW,
k);
gpuErrchk(hipMemcpy(h_convolved, d_convolved,
convSize, hipMemcpyDeviceToHost));
vector<pair<int,int> > miss;
for(int i=0; i<convH; i++){
for(int j=0; j<convW; j++){
//cout<<h_convolved[i*convW +j]<<" ";
if(h_convolved[i*convW +j] != 4.5){
miss.push_back(make_pair(i,j));
}
}
//cout<<"\n";
}
cout<<miss.size()<<"\n";
for(int i=0;i<miss.size();i++){
cout<<miss[i].first<<","<<miss[i].second<<"\n";
}
hipDeviceReset();
delete h_img;
delete h_convolved;
return 0;
} | .text
.file "convolution.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z26__device_stub__conv_kernelPKfPfiiiiiiiii # -- Begin function _Z26__device_stub__conv_kernelPKfPfiiiiiiiii
.p2align 4, 0x90
.type _Z26__device_stub__conv_kernelPKfPfiiiiiiiii,@function
_Z26__device_stub__conv_kernelPKfPfiiiiiiiii: # @_Z26__device_stub__conv_kernelPKfPfiiiiiiiii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 176(%rsp), %rax
movq %rax, 128(%rsp)
leaq 184(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11conv_kernelPKfPfiiiiiiiii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z26__device_stub__conv_kernelPKfPfiiiiiiiii, .Lfunc_end0-_Z26__device_stub__conv_kernelPKfPfiiiiiiiii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x40900000 # float 4.5
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $248, %rsp
.cfi_def_cfa_offset 304
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
.cfi_escape 0x2e, 0x00
movl $16777216, %edi # imm = 0x1000000
callq _Znam
movq %rax, %r15
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%r15,%rax,4) # imm = 0x3F800000
incq %rax
cmpq $4194304, %rax # imm = 0x400000
jne .LBB1_1
# %bb.2:
.cfi_escape 0x2e, 0x00
leaq 64(%rsp), %rdi
movl $16777216, %esi # imm = 0x1000000
callq hipMalloc
testl %eax, %eax
jne .LBB1_3
# %bb.5: # %_Z9gpuAssert10hipError_tPKcib.exit
movq 64(%rsp), %rdi
.cfi_escape 0x2e, 0x00
movl $16777216, %edx # imm = 0x1000000
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_6
# %bb.9: # %_Z9gpuAssert10hipError_tPKcib.exit85
.cfi_escape 0x2e, 0x00
movl $36, %edi
callq _Znam
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_10: # =>This Inner Loop Header: Depth=1
movl $1056964608, (%rax,%rcx,4) # imm = 0x3F000000
incq %rcx
cmpq $9, %rcx
jne .LBB1_10
# %bb.7:
.cfi_escape 0x2e, 0x00
movl $cm, %edi
movl $36, %edx
movq %rax, %rsi
xorl %ecx, %ecx
movl $1, %r8d
callq hipMemcpyToSymbol
testl %eax, %eax
jne .LBB1_8
# %bb.11: # %_Z9gpuAssert10hipError_tPKcib.exit87
.cfi_escape 0x2e, 0x00
movl $16744464, %edi # imm = 0xFF8010
callq _Znam
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
movl $16744464, %edx # imm = 0xFF8010
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
.cfi_escape 0x2e, 0x00
leaq 8(%rsp), %rdi
movl $16744464, %esi # imm = 0xFF8010
callq hipMalloc
testl %eax, %eax
jne .LBB1_12
# %bb.13: # %_Z9gpuAssert10hipError_tPKcib.exit89
movq 8(%rsp), %rdi
.cfi_escape 0x2e, 0x00
movl $16744464, %edx # imm = 0xFF8010
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_14
# %bb.15: # %_Z9gpuAssert10hipError_tPKcib.exit91
movabsq $4294967328, %rdx # imm = 0x100000020
leaq 16352(%rdx), %rdi
.cfi_escape 0x2e, 0x00
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_17
# %bb.16:
movq 64(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movl $8, 52(%rsp)
movl $32, 48(%rsp)
movl $2048, 44(%rsp) # imm = 0x800
movl $2048, 40(%rsp) # imm = 0x800
movl $2046, 36(%rsp) # imm = 0x7FE
movl $2046, 32(%rsp) # imm = 0x7FE
movl $10, 28(%rsp)
movl $34, 24(%rsp)
movl $3, 20(%rsp)
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 144(%rsp), %rax
movq %rax, 168(%rsp)
leaq 52(%rsp), %rax
movq %rax, 176(%rsp)
leaq 48(%rsp), %rax
movq %rax, 184(%rsp)
leaq 44(%rsp), %rax
movq %rax, 192(%rsp)
leaq 40(%rsp), %rax
movq %rax, 200(%rsp)
leaq 36(%rsp), %rax
movq %rax, 208(%rsp)
leaq 32(%rsp), %rax
movq %rax, 216(%rsp)
leaq 28(%rsp), %rax
movq %rax, 224(%rsp)
leaq 24(%rsp), %rax
movq %rax, 232(%rsp)
leaq 20(%rsp), %rax
movq %rax, 240(%rsp)
.cfi_escape 0x2e, 0x00
leaq 128(%rsp), %rdi
leaq 112(%rsp), %rsi
leaq 104(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 112(%rsp), %rcx
movl 120(%rsp), %r8d
.cfi_escape 0x2e, 0x10
leaq 160(%rsp), %r9
movl $_Z11conv_kernelPKfPfiiiiiiiii, %edi
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_17:
movq 8(%rsp), %rsi
.cfi_escape 0x2e, 0x00
movl $16744464, %edx # imm = 0xFF8010
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_59
# %bb.18: # %.preheader.preheader
xorl %ecx, %ecx
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorl %eax, %eax
movq %rax, (%rsp) # 8-byte Spill
xorl %r12d, %r12d
xorl %eax, %eax
movq %rbx, 56(%rsp) # 8-byte Spill
movq %r15, 72(%rsp) # 8-byte Spill
jmp .LBB1_19
.p2align 4, 0x90
.LBB1_24: # %_Z9gpuAssert10hipError_tPKcib.exit93
# in Loop: Header=BB1_19 Depth=1
incq %rcx
cmpq $2046, %rcx # imm = 0x7FE
je .LBB1_25
.LBB1_19: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_20 Depth 2
# Child Loop BB1_47 Depth 3
imulq $8184, %rcx, %rdx # imm = 0x1FF8
addq %rbx, %rdx
xorl %r14d, %r14d
movq %rcx, 88(%rsp) # 8-byte Spill
movq %rdx, 80(%rsp) # 8-byte Spill
jmp .LBB1_20
.p2align 4, 0x90
.LBB1_22: # in Loop: Header=BB1_20 Depth=2
movq %rbp, (%r12)
addq $8, %r12
.LBB1_23: # %_ZNSt6vectorISt4pairIiiESaIS1_EE9push_backEOS1_.exit
# in Loop: Header=BB1_20 Depth=2
incq %r14
cmpq $2046, %r14 # imm = 0x7FE
je .LBB1_24
.LBB1_20: # Parent Loop BB1_19 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB1_47 Depth 3
movss (%rdx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
ucomiss %xmm1, %xmm0
jne .LBB1_21
jnp .LBB1_23
.LBB1_21: # in Loop: Header=BB1_20 Depth=2
movq %r14, %rbp
shlq $32, %rbp
orq %rcx, %rbp
cmpq %rax, %r12
jne .LBB1_22
# %bb.38: # in Loop: Header=BB1_20 Depth=2
movq %r12, %r13
subq (%rsp), %r13 # 8-byte Folded Reload
movabsq $9223372036854775800, %rax # imm = 0x7FFFFFFFFFFFFFF8
cmpq %rax, %r13
je .LBB1_39
# %bb.41: # %_ZNKSt6vectorISt4pairIiiESaIS1_EE12_M_check_lenEmPKc.exit.i
# in Loop: Header=BB1_20 Depth=2
sarq $3, %r13
cmpq $1, %r13
movq %r13, %rax
adcq $0, %rax
leaq (%rax,%r13), %r15
movabsq $1152921504606846975, %rcx # imm = 0xFFFFFFFFFFFFFFF
cmpq %rcx, %r15
cmovaeq %rcx, %r15
addq %r13, %rax
cmovbq %rcx, %r15
testq %r15, %r15
je .LBB1_42
# %bb.43: # in Loop: Header=BB1_20 Depth=2
leaq (,%r15,8), %rdi
.Ltmp0:
.cfi_escape 0x2e, 0x00
callq _Znwm
.Ltmp1:
# %bb.44: # in Loop: Header=BB1_20 Depth=2
movq %rax, %rbx
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
jmp .LBB1_45
.LBB1_42: # in Loop: Header=BB1_20 Depth=2
xorl %ebx, %ebx
.LBB1_45: # %_ZNSt12_Vector_baseISt4pairIiiESaIS1_EE11_M_allocateEm.exit.i
# in Loop: Header=BB1_20 Depth=2
movq %rbp, (%rbx,%r13,8)
movq %rbx, %r13
cmpq %r12, (%rsp) # 8-byte Folded Reload
je .LBB1_48
# %bb.46: # %.lr.ph.i.i.i.i.preheader
# in Loop: Header=BB1_20 Depth=2
movq %rbx, %r13
movq (%rsp), %rax # 8-byte Reload
.p2align 4, 0x90
.LBB1_47: # %.lr.ph.i.i.i.i
# Parent Loop BB1_19 Depth=1
# Parent Loop BB1_20 Depth=2
# => This Inner Loop Header: Depth=3
movq (%rax), %rcx
movq %rcx, (%r13)
addq $8, %rax
addq $8, %r13
cmpq %r12, %rax
jne .LBB1_47
.LBB1_48: # %_ZNSt6vectorISt4pairIiiESaIS1_EE11_S_relocateEPS1_S4_S4_RS2_.exit.i
# in Loop: Header=BB1_20 Depth=2
movq (%rsp), %rdi # 8-byte Reload
testq %rdi, %rdi
je .LBB1_50
# %bb.49: # in Loop: Header=BB1_20 Depth=2
.cfi_escape 0x2e, 0x00
callq _ZdlPv
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
.LBB1_50: # %_ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_.exit
# in Loop: Header=BB1_20 Depth=2
addq $8, %r13
leaq (%rbx,%r15,8), %rax
movq %r13, %r12
movq %rbx, (%rsp) # 8-byte Spill
movq 72(%rsp), %r15 # 8-byte Reload
movq 56(%rsp), %rbx # 8-byte Reload
movq 88(%rsp), %rcx # 8-byte Reload
movq 80(%rsp), %rdx # 8-byte Reload
jmp .LBB1_23
.LBB1_25:
movq %r12, %r14
subq (%rsp), %r14 # 8-byte Folded Reload
sarq $3, %r14
.Ltmp3:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movq %r14, %rsi
callq _ZNSo9_M_insertImEERSoT_
.Ltmp4:
# %bb.26: # %_ZNSolsEm.exit
.Ltmp5:
.cfi_escape 0x2e, 0x00
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp6:
# %bb.27: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit.preheader
movq (%rsp), %rax # 8-byte Reload
cmpq %rax, %r12
je .LBB1_34
# %bb.28: # %.lr.ph.preheader
movq %rax, %r12
cmpq $1, %r14
adcq $0, %r14
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_29: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl (%r12,%rbx,8), %esi
.Ltmp7:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZNSolsEi
.Ltmp8:
# %bb.30: # in Loop: Header=BB1_29 Depth=1
.Ltmp9:
movq %rax, %r13
.cfi_escape 0x2e, 0x00
movl $.L.str.2, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp10:
# %bb.31: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit96
# in Loop: Header=BB1_29 Depth=1
movl 4(%r12,%rbx,8), %esi
.Ltmp11:
.cfi_escape 0x2e, 0x00
movq %r13, %rdi
callq _ZNSolsEi
.Ltmp12:
# %bb.32: # in Loop: Header=BB1_29 Depth=1
.Ltmp13:
.cfi_escape 0x2e, 0x00
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp14:
# %bb.33: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit98
# in Loop: Header=BB1_29 Depth=1
incq %rbx
cmpq %rbx, %r14
jne .LBB1_29
.LBB1_34: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit._crit_edge
.Ltmp16:
.cfi_escape 0x2e, 0x00
callq hipDeviceReset
.Ltmp17:
# %bb.35:
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
callq _ZdlPv
.cfi_escape 0x2e, 0x00
movq 56(%rsp), %rdi # 8-byte Reload
callq _ZdlPv
movq (%rsp), %rdi # 8-byte Reload
testq %rdi, %rdi
je .LBB1_37
# %bb.36:
.cfi_escape 0x2e, 0x00
callq _ZdlPv
.LBB1_37: # %_ZNSt6vectorISt4pairIiiESaIS1_EED2Ev.exit
xorl %eax, %eax
addq $248, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_39:
.cfi_def_cfa_offset 304
.Ltmp19:
.cfi_escape 0x2e, 0x00
movl $.L.str.4, %edi
callq _ZSt20__throw_length_errorPKc
.Ltmp20:
# %bb.40: # %.noexc102
.LBB1_3:
movq stderr(%rip), %rbx
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
.cfi_escape 0x2e, 0x00
movl $.L.str.3, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $233, %r8d
jmp .LBB1_4
.LBB1_6:
movq stderr(%rip), %rbx
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
.cfi_escape 0x2e, 0x00
movl $.L.str.3, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $234, %r8d
jmp .LBB1_4
.LBB1_8:
movq stderr(%rip), %rbx
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
.cfi_escape 0x2e, 0x00
movl $.L.str.3, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $242, %r8d
jmp .LBB1_4
.LBB1_12:
movq stderr(%rip), %rbx
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
.cfi_escape 0x2e, 0x00
movl $.L.str.3, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $253, %r8d
jmp .LBB1_4
.LBB1_14:
movq stderr(%rip), %rbx
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
.cfi_escape 0x2e, 0x00
movl $.L.str.3, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $255, %r8d
jmp .LBB1_4
.LBB1_59:
movq stderr(%rip), %rbx
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
.cfi_escape 0x2e, 0x00
movl $.L.str.3, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $267, %r8d # imm = 0x10B
.LBB1_4:
xorl %eax, %eax
callq fprintf
.cfi_escape 0x2e, 0x00
movl %ebp, %edi
callq exit
.LBB1_54:
.Ltmp18:
jmp .LBB1_52
.LBB1_55:
.Ltmp15:
movq %rax, %rbx
jmp .LBB1_56
.LBB1_51: # %.loopexit
.Ltmp2:
jmp .LBB1_52
.LBB1_53: # %.loopexit.split-lp
.Ltmp21:
.LBB1_52:
movq %rax, %rbx
movq (%rsp), %r12 # 8-byte Reload
.LBB1_56:
testq %r12, %r12
je .LBB1_58
# %bb.57:
.cfi_escape 0x2e, 0x00
movq %r12, %rdi
callq _ZdlPv
.LBB1_58: # %_ZNSt6vectorISt4pairIiiESaIS1_EED2Ev.exit100
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table1:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1
.uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2
.byte 0 # On action: cleanup
.uleb128 .Ltmp3-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp6-.Ltmp3 # Call between .Ltmp3 and .Ltmp6
.uleb128 .Ltmp18-.Lfunc_begin0 # jumps to .Ltmp18
.byte 0 # On action: cleanup
.uleb128 .Ltmp7-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp14-.Ltmp7 # Call between .Ltmp7 and .Ltmp14
.uleb128 .Ltmp15-.Lfunc_begin0 # jumps to .Ltmp15
.byte 0 # On action: cleanup
.uleb128 .Ltmp16-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Ltmp17-.Ltmp16 # Call between .Ltmp16 and .Ltmp17
.uleb128 .Ltmp18-.Lfunc_begin0 # jumps to .Ltmp18
.byte 0 # On action: cleanup
.uleb128 .Ltmp19-.Lfunc_begin0 # >> Call Site 6 <<
.uleb128 .Ltmp20-.Ltmp19 # Call between .Ltmp19 and .Ltmp20
.uleb128 .Ltmp21-.Lfunc_begin0 # jumps to .Ltmp21
.byte 0 # On action: cleanup
.uleb128 .Ltmp20-.Lfunc_begin0 # >> Call Site 7 <<
.uleb128 .Lfunc_end1-.Ltmp20 # Call between .Ltmp20 and .Lfunc_end1
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.p2align 2, 0x0
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11conv_kernelPKfPfiiiiiiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $1, (%rsp)
movl $cm, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $36, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type cm,@object # @cm
.local cm
.comm cm,36,16
.type _Z11conv_kernelPKfPfiiiiiiiii,@object # @_Z11conv_kernelPKfPfiiiiiiiii
.section .rodata,"a",@progbits
.globl _Z11conv_kernelPKfPfiiiiiiiii
.p2align 3, 0x0
_Z11conv_kernelPKfPfiiiiiiiii:
.quad _Z26__device_stub__conv_kernelPKfPfiiiiiiiii
.size _Z11conv_kernelPKfPfiiiiiiiii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/yottabytt/convolution_kernel/master/convolution.hip"
.size .L.str, 109
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\n"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz ","
.size .L.str.2, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "GPUassert: %s %s %d\n"
.size .L.str.3, 21
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "vector::_M_realloc_insert"
.size .L.str.4, 26
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11conv_kernelPKfPfiiiiiiiii"
.size .L__unnamed_1, 30
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "cm"
.size .L__unnamed_2, 3
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__conv_kernelPKfPfiiiiiiiii
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Unwind_Resume
.addrsig_sym cm
.addrsig_sym _Z11conv_kernelPKfPfiiiiiiiii
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000691b4_00000000-6_convolution.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4057:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata._Z9gpuAssert9cudaErrorPKcib.str1.1,"aMS",@progbits,1
.LC0:
.string "GPUassert: %s %s %d\n"
.section .text._Z9gpuAssert9cudaErrorPKcib,"axG",@progbits,_Z9gpuAssert9cudaErrorPKcib,comdat
.weak _Z9gpuAssert9cudaErrorPKcib
.type _Z9gpuAssert9cudaErrorPKcib, @function
_Z9gpuAssert9cudaErrorPKcib:
.LFB4032:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L9
ret
.L9:
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movl %edi, %ebx
movq %rsi, %r13
movl %edx, %r12d
movl %ecx, %ebp
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %r12d, %r9d
movq %r13, %r8
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
testb %bpl, %bpl
jne .L10
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L10:
.cfi_restore_state
movl %ebx, %edi
call exit@PLT
.cfi_endproc
.LFE4032:
.size _Z9gpuAssert9cudaErrorPKcib, .-_Z9gpuAssert9cudaErrorPKcib
.text
.globl _Z4convPKfPfiiiiiiiiiS1_iiiiiiii
.type _Z4convPKfPfiiiiiiiiiS1_iiiiiiii, @function
_Z4convPKfPfiiiiiiiiiS1_iiiiiiii:
.LFB4033:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE4033:
.size _Z4convPKfPfiiiiiiiiiS1_iiiiiiii, .-_Z4convPKfPfiiiiiiiiiS1_iiiiiiii
.globl _Z43__device_stub__Z11conv_kernelPKfPfiiiiiiiiiPKfPfiiiiiiiii
.type _Z43__device_stub__Z11conv_kernelPKfPfiiiiiiiiiPKfPfiiiiiiiii, @function
_Z43__device_stub__Z11conv_kernelPKfPfiiiiiiiiiPKfPfiiiiiiiii:
.LFB4079:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
leaq 208(%rsp), %rax
movq %rax, 144(%rsp)
leaq 216(%rsp), %rax
movq %rax, 152(%rsp)
leaq 224(%rsp), %rax
movq %rax, 160(%rsp)
leaq 232(%rsp), %rax
movq %rax, 168(%rsp)
leaq 240(%rsp), %rax
movq %rax, 176(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 216
pushq 40(%rsp)
.cfi_def_cfa_offset 224
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11conv_kernelPKfPfiiiiiiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4079:
.size _Z43__device_stub__Z11conv_kernelPKfPfiiiiiiiiiPKfPfiiiiiiiii, .-_Z43__device_stub__Z11conv_kernelPKfPfiiiiiiiiiPKfPfiiiiiiiii
.globl _Z11conv_kernelPKfPfiiiiiiiii
.type _Z11conv_kernelPKfPfiiiiiiiii, @function
_Z11conv_kernelPKfPfiiiiiiiii:
.LFB4080:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 64
call _Z43__device_stub__Z11conv_kernelPKfPfiiiiiiiiiPKfPfiiiiiiiii
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4080:
.size _Z11conv_kernelPKfPfiiiiiiiii, .-_Z11conv_kernelPKfPfiiiiiiiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z11conv_kernelPKfPfiiiiiiiii"
.LC2:
.string "cm"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4082:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z11conv_kernelPKfPfiiiiiiiii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $36, %r9d
movl $0, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _ZL2cm(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4082:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._ZNSt6vectorISt4pairIiiESaIS1_EED2Ev,"axG",@progbits,_ZNSt6vectorISt4pairIiiESaIS1_EED5Ev,comdat
.align 2
.weak _ZNSt6vectorISt4pairIiiESaIS1_EED2Ev
.type _ZNSt6vectorISt4pairIiiESaIS1_EED2Ev, @function
_ZNSt6vectorISt4pairIiiESaIS1_EED2Ev:
.LFB4393:
.cfi_startproc
endbr64
movq (%rdi), %rax
testq %rax, %rax
je .L26
subq $8, %rsp
.cfi_def_cfa_offset 16
movq 16(%rdi), %rsi
subq %rax, %rsi
movq %rax, %rdi
call _ZdlPvm@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.L26:
ret
.cfi_endproc
.LFE4393:
.size _ZNSt6vectorISt4pairIiiESaIS1_EED2Ev, .-_ZNSt6vectorISt4pairIiiESaIS1_EED2Ev
.weak _ZNSt6vectorISt4pairIiiESaIS1_EED1Ev
.set _ZNSt6vectorISt4pairIiiESaIS1_EED1Ev,_ZNSt6vectorISt4pairIiiESaIS1_EED2Ev
.section .rodata._ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_.str1.1,"aMS",@progbits,1
.LC3:
.string "vector::_M_realloc_insert"
.section .text._ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_,"axG",@progbits,_ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_,comdat
.align 2
.weak _ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_
.type _ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_, @function
_ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_:
.LFB4652:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdx, 8(%rsp)
movq 8(%rdi), %rbp
movq (%rdi), %r12
movq %rbp, %rax
subq %r12, %rax
sarq $3, %rax
movabsq $1152921504606846975, %rdx
cmpq %rdx, %rax
je .L47
movq %rdi, %r14
movq %rsi, %r13
movq %rsi, %rbx
cmpq %r12, %rbp
movl $1, %edx
cmovne %rax, %rdx
addq %rdx, %rax
jc .L32
movabsq $1152921504606846975, %rdx
cmpq %rdx, %rax
cmovbe %rax, %rdx
movq %rdx, 16(%rsp)
movq %rsi, %rdi
subq %r12, %rdi
movq %rdi, 24(%rsp)
movl $0, %r15d
testq %rax, %rax
je .L33
jmp .L39
.L47:
leaq .LC3(%rip), %rdi
call _ZSt20__throw_length_errorPKc@PLT
.L42:
movq %r15, %rsi
jmp .L34
.L32:
movq %rsi, %rax
subq %r12, %rax
movq %rax, 24(%rsp)
movabsq $1152921504606846975, %rax
movq %rax, 16(%rsp)
.L39:
movq 16(%rsp), %rax
leaq 0(,%rax,8), %rdi
call _Znwm@PLT
movq %rax, %r15
.L33:
movq 8(%rsp), %rax
movq (%rax), %rax
movq 24(%rsp), %rdi
movq %rax, (%r15,%rdi)
cmpq %r12, %r13
je .L42
movq %r13, %rsi
subq %r12, %rsi
addq %r15, %rsi
movq %r15, %rax
movq %r12, %rdx
.L35:
movq (%rdx), %rcx
movq %rcx, (%rax)
addq $8, %rdx
addq $8, %rax
cmpq %rax, %rsi
jne .L35
.L34:
leaq 8(%rsi), %rax
movq %rax, 8(%rsp)
cmpq %rbp, %r13
je .L36
movq %rbp, %rcx
subq %r13, %rcx
.L37:
movq (%rbx), %rdx
movq %rdx, (%rax)
addq $8, %rbx
addq $8, %rax
cmpq %rbp, %rbx
jne .L37
addq %rcx, 8(%rsp)
.L36:
testq %r12, %r12
je .L38
movq 16(%r14), %rsi
subq %r12, %rsi
movq %r12, %rdi
call _ZdlPvm@PLT
.L38:
movq %r15, (%r14)
movq 8(%rsp), %rax
movq %rax, 8(%r14)
movq 16(%rsp), %rax
leaq (%r15,%rax,8), %rax
movq %rax, 16(%r14)
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4652:
.size _ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_, .-_ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "/home/ubuntu/Datasets/stackv2/train-structured/yottabytt/convolution_kernel/master/convolution.cu"
.section .rodata.str1.1
.LC9:
.string "\n"
.LC10:
.string ","
.text
.globl main
.type main, @function
main:
.LFB4034:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4034
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $16777216, %edi
.LEHB0:
call _Znam@PLT
movq %rax, %rbp
leaq 16777216(%rax), %rdx
movss .LC4(%rip), %xmm0
.L49:
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L49
leaq 16(%rsp), %rdi
movl $16777216, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $1, %ecx
movl $233, %edx
leaq .LC5(%rip), %rbx
movq %rbx, %rsi
call _Z9gpuAssert9cudaErrorPKcib
movl $1, %ecx
movl $16777216, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $1, %ecx
movl $234, %edx
movq %rbx, %rsi
call _Z9gpuAssert9cudaErrorPKcib
movl $36, %edi
call _Znam@PLT
movq %rax, %rsi
leaq 36(%rax), %rdx
movss .LC6(%rip), %xmm0
.L50:
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L50
movl $1, %r8d
movl $0, %ecx
movl $36, %edx
leaq _ZL2cm(%rip), %rdi
call cudaMemcpyToSymbol@PLT
movl %eax, %edi
movl $1, %ecx
movl $242, %edx
leaq .LC5(%rip), %rsi
call _Z9gpuAssert9cudaErrorPKcib
movl $16744464, %edi
call _Znam@PLT
movq %rax, %rbx
movq %rax, %r13
leaq 16744464(%rax), %rdx
.L51:
movl $0x00000000, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L51
leaq 24(%rsp), %rdi
movl $16744464, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $1, %ecx
movl $253, %edx
leaq .LC5(%rip), %r12
movq %r12, %rsi
call _Z9gpuAssert9cudaErrorPKcib
movl $1, %ecx
movl $16744464, %edx
movq %rbx, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $1, %ecx
movl $254, %edx
movq %r12, %rsi
call _Z9gpuAssert9cudaErrorPKcib
movl $32, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $16384, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 48(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L74
.L52:
movl $2, %ecx
movl $16744464, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $1, %ecx
movl $266, %edx
leaq .LC5(%rip), %rsi
call _Z9gpuAssert9cudaErrorPKcib
movq $0, 48(%rsp)
movq $0, 56(%rsp)
movq $0, 64(%rsp)
movl $0, %r14d
leaq 36(%rsp), %r15
jmp .L53
.L74:
subq $8, %rsp
.cfi_def_cfa_offset 152
pushq $3
.cfi_def_cfa_offset 160
pushq $34
.cfi_def_cfa_offset 168
pushq $10
.cfi_def_cfa_offset 176
pushq $2046
.cfi_def_cfa_offset 184
pushq $2046
.cfi_def_cfa_offset 192
movl $2048, %r9d
movl $2048, %r8d
movl $32, %ecx
movl $8, %edx
movq 72(%rsp), %rsi
movq 64(%rsp), %rdi
call _Z43__device_stub__Z11conv_kernelPKfPfiiiiiiiiiPKfPfiiiiiiiii
.LEHE0:
addq $48, %rsp
.cfi_def_cfa_offset 144
jmp .L52
.L56:
leaq 48(%rsp), %rdi
movq %r15, %rdx
.LEHB1:
call _ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_
.L54:
addq $1, %r12
cmpq $2046, %r12
je .L75
.L58:
movss 0(%r13,%r12,4), %xmm0
ucomiss .LC8(%rip), %xmm0
jp .L66
je .L54
.L66:
movl %r14d, 36(%rsp)
movl %r12d, 40(%rsp)
movq 56(%rsp), %rsi
cmpq 64(%rsp), %rsi
je .L56
movq 36(%rsp), %rax
movq %rax, (%rsi)
addq $8, %rsi
movq %rsi, 56(%rsp)
jmp .L54
.L75:
addl $1, %r14d
addq $8184, %r13
cmpl $2046, %r14d
je .L59
.L53:
movl $0, %r12d
jmp .L58
.L59:
movq 56(%rsp), %r12
movq 48(%rsp), %r14
movq %r12, %r13
subq %r14, %r13
sarq $3, %r13
movq %r13, %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC9(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
cmpq %r14, %r12
je .L60
movl $0, %r12d
leaq _ZSt4cout(%rip), %r15
jmp .L61
.L76:
movl $1, %edx
leaq .LC10(%rip), %rsi
movq %rax, 8(%rsp)
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl 4(%r14,%r12,8), %esi
movq 8(%rsp), %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
leaq .LC9(%rip), %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $1, %r12
cmpq %r13, %r12
jnb .L60
.L61:
movl (%r14,%r12,8), %esi
movq %r15, %rdi
call _ZNSolsEi@PLT
jmp .L76
.L60:
call cudaDeviceReset@PLT
.LEHE1:
movl $4, %esi
movq %rbp, %rdi
call _ZdlPvm@PLT
movl $4, %esi
movq %rbx, %rdi
call _ZdlPvm@PLT
leaq 48(%rsp), %rdi
call _ZNSt6vectorISt4pairIiiESaIS1_EED1Ev
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L77
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L65:
.cfi_restore_state
endbr64
movq %rax, %rbx
leaq 48(%rsp), %rdi
call _ZNSt6vectorISt4pairIiiESaIS1_EED1Ev
movq 72(%rsp), %rax
subq %fs:40, %rax
je .L63
call __stack_chk_fail@PLT
.L63:
movq %rbx, %rdi
.LEHB2:
call _Unwind_Resume@PLT
.LEHE2:
.L77:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4034:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA4034:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE4034-.LLSDACSB4034
.LLSDACSB4034:
.uleb128 .LEHB0-.LFB4034
.uleb128 .LEHE0-.LEHB0
.uleb128 0
.uleb128 0
.uleb128 .LEHB1-.LFB4034
.uleb128 .LEHE1-.LEHB1
.uleb128 .L65-.LFB4034
.uleb128 0
.uleb128 .LEHB2-.LFB4034
.uleb128 .LEHE2-.LEHB2
.uleb128 0
.uleb128 0
.LLSDACSE4034:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL2cm
.comm _ZL2cm,36,32
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC4:
.long 1065353216
.align 4
.LC6:
.long 1056964608
.align 4
.LC8:
.long 1083179008
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "convolution.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z26__device_stub__conv_kernelPKfPfiiiiiiiii # -- Begin function _Z26__device_stub__conv_kernelPKfPfiiiiiiiii
.p2align 4, 0x90
.type _Z26__device_stub__conv_kernelPKfPfiiiiiiiii,@function
_Z26__device_stub__conv_kernelPKfPfiiiiiiiii: # @_Z26__device_stub__conv_kernelPKfPfiiiiiiiii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 176(%rsp), %rax
movq %rax, 128(%rsp)
leaq 184(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11conv_kernelPKfPfiiiiiiiii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z26__device_stub__conv_kernelPKfPfiiiiiiiii, .Lfunc_end0-_Z26__device_stub__conv_kernelPKfPfiiiiiiiii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x40900000 # float 4.5
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $248, %rsp
.cfi_def_cfa_offset 304
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
.cfi_escape 0x2e, 0x00
movl $16777216, %edi # imm = 0x1000000
callq _Znam
movq %rax, %r15
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%r15,%rax,4) # imm = 0x3F800000
incq %rax
cmpq $4194304, %rax # imm = 0x400000
jne .LBB1_1
# %bb.2:
.cfi_escape 0x2e, 0x00
leaq 64(%rsp), %rdi
movl $16777216, %esi # imm = 0x1000000
callq hipMalloc
testl %eax, %eax
jne .LBB1_3
# %bb.5: # %_Z9gpuAssert10hipError_tPKcib.exit
movq 64(%rsp), %rdi
.cfi_escape 0x2e, 0x00
movl $16777216, %edx # imm = 0x1000000
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_6
# %bb.9: # %_Z9gpuAssert10hipError_tPKcib.exit85
.cfi_escape 0x2e, 0x00
movl $36, %edi
callq _Znam
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_10: # =>This Inner Loop Header: Depth=1
movl $1056964608, (%rax,%rcx,4) # imm = 0x3F000000
incq %rcx
cmpq $9, %rcx
jne .LBB1_10
# %bb.7:
.cfi_escape 0x2e, 0x00
movl $cm, %edi
movl $36, %edx
movq %rax, %rsi
xorl %ecx, %ecx
movl $1, %r8d
callq hipMemcpyToSymbol
testl %eax, %eax
jne .LBB1_8
# %bb.11: # %_Z9gpuAssert10hipError_tPKcib.exit87
.cfi_escape 0x2e, 0x00
movl $16744464, %edi # imm = 0xFF8010
callq _Znam
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
movl $16744464, %edx # imm = 0xFF8010
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
.cfi_escape 0x2e, 0x00
leaq 8(%rsp), %rdi
movl $16744464, %esi # imm = 0xFF8010
callq hipMalloc
testl %eax, %eax
jne .LBB1_12
# %bb.13: # %_Z9gpuAssert10hipError_tPKcib.exit89
movq 8(%rsp), %rdi
.cfi_escape 0x2e, 0x00
movl $16744464, %edx # imm = 0xFF8010
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_14
# %bb.15: # %_Z9gpuAssert10hipError_tPKcib.exit91
movabsq $4294967328, %rdx # imm = 0x100000020
leaq 16352(%rdx), %rdi
.cfi_escape 0x2e, 0x00
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_17
# %bb.16:
movq 64(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movl $8, 52(%rsp)
movl $32, 48(%rsp)
movl $2048, 44(%rsp) # imm = 0x800
movl $2048, 40(%rsp) # imm = 0x800
movl $2046, 36(%rsp) # imm = 0x7FE
movl $2046, 32(%rsp) # imm = 0x7FE
movl $10, 28(%rsp)
movl $34, 24(%rsp)
movl $3, 20(%rsp)
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 144(%rsp), %rax
movq %rax, 168(%rsp)
leaq 52(%rsp), %rax
movq %rax, 176(%rsp)
leaq 48(%rsp), %rax
movq %rax, 184(%rsp)
leaq 44(%rsp), %rax
movq %rax, 192(%rsp)
leaq 40(%rsp), %rax
movq %rax, 200(%rsp)
leaq 36(%rsp), %rax
movq %rax, 208(%rsp)
leaq 32(%rsp), %rax
movq %rax, 216(%rsp)
leaq 28(%rsp), %rax
movq %rax, 224(%rsp)
leaq 24(%rsp), %rax
movq %rax, 232(%rsp)
leaq 20(%rsp), %rax
movq %rax, 240(%rsp)
.cfi_escape 0x2e, 0x00
leaq 128(%rsp), %rdi
leaq 112(%rsp), %rsi
leaq 104(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 112(%rsp), %rcx
movl 120(%rsp), %r8d
.cfi_escape 0x2e, 0x10
leaq 160(%rsp), %r9
movl $_Z11conv_kernelPKfPfiiiiiiiii, %edi
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_17:
movq 8(%rsp), %rsi
.cfi_escape 0x2e, 0x00
movl $16744464, %edx # imm = 0xFF8010
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_59
# %bb.18: # %.preheader.preheader
xorl %ecx, %ecx
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorl %eax, %eax
movq %rax, (%rsp) # 8-byte Spill
xorl %r12d, %r12d
xorl %eax, %eax
movq %rbx, 56(%rsp) # 8-byte Spill
movq %r15, 72(%rsp) # 8-byte Spill
jmp .LBB1_19
.p2align 4, 0x90
.LBB1_24: # %_Z9gpuAssert10hipError_tPKcib.exit93
# in Loop: Header=BB1_19 Depth=1
incq %rcx
cmpq $2046, %rcx # imm = 0x7FE
je .LBB1_25
.LBB1_19: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_20 Depth 2
# Child Loop BB1_47 Depth 3
imulq $8184, %rcx, %rdx # imm = 0x1FF8
addq %rbx, %rdx
xorl %r14d, %r14d
movq %rcx, 88(%rsp) # 8-byte Spill
movq %rdx, 80(%rsp) # 8-byte Spill
jmp .LBB1_20
.p2align 4, 0x90
.LBB1_22: # in Loop: Header=BB1_20 Depth=2
movq %rbp, (%r12)
addq $8, %r12
.LBB1_23: # %_ZNSt6vectorISt4pairIiiESaIS1_EE9push_backEOS1_.exit
# in Loop: Header=BB1_20 Depth=2
incq %r14
cmpq $2046, %r14 # imm = 0x7FE
je .LBB1_24
.LBB1_20: # Parent Loop BB1_19 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB1_47 Depth 3
movss (%rdx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
ucomiss %xmm1, %xmm0
jne .LBB1_21
jnp .LBB1_23
.LBB1_21: # in Loop: Header=BB1_20 Depth=2
movq %r14, %rbp
shlq $32, %rbp
orq %rcx, %rbp
cmpq %rax, %r12
jne .LBB1_22
# %bb.38: # in Loop: Header=BB1_20 Depth=2
movq %r12, %r13
subq (%rsp), %r13 # 8-byte Folded Reload
movabsq $9223372036854775800, %rax # imm = 0x7FFFFFFFFFFFFFF8
cmpq %rax, %r13
je .LBB1_39
# %bb.41: # %_ZNKSt6vectorISt4pairIiiESaIS1_EE12_M_check_lenEmPKc.exit.i
# in Loop: Header=BB1_20 Depth=2
sarq $3, %r13
cmpq $1, %r13
movq %r13, %rax
adcq $0, %rax
leaq (%rax,%r13), %r15
movabsq $1152921504606846975, %rcx # imm = 0xFFFFFFFFFFFFFFF
cmpq %rcx, %r15
cmovaeq %rcx, %r15
addq %r13, %rax
cmovbq %rcx, %r15
testq %r15, %r15
je .LBB1_42
# %bb.43: # in Loop: Header=BB1_20 Depth=2
leaq (,%r15,8), %rdi
.Ltmp0:
.cfi_escape 0x2e, 0x00
callq _Znwm
.Ltmp1:
# %bb.44: # in Loop: Header=BB1_20 Depth=2
movq %rax, %rbx
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
jmp .LBB1_45
.LBB1_42: # in Loop: Header=BB1_20 Depth=2
xorl %ebx, %ebx
.LBB1_45: # %_ZNSt12_Vector_baseISt4pairIiiESaIS1_EE11_M_allocateEm.exit.i
# in Loop: Header=BB1_20 Depth=2
movq %rbp, (%rbx,%r13,8)
movq %rbx, %r13
cmpq %r12, (%rsp) # 8-byte Folded Reload
je .LBB1_48
# %bb.46: # %.lr.ph.i.i.i.i.preheader
# in Loop: Header=BB1_20 Depth=2
movq %rbx, %r13
movq (%rsp), %rax # 8-byte Reload
.p2align 4, 0x90
.LBB1_47: # %.lr.ph.i.i.i.i
# Parent Loop BB1_19 Depth=1
# Parent Loop BB1_20 Depth=2
# => This Inner Loop Header: Depth=3
movq (%rax), %rcx
movq %rcx, (%r13)
addq $8, %rax
addq $8, %r13
cmpq %r12, %rax
jne .LBB1_47
.LBB1_48: # %_ZNSt6vectorISt4pairIiiESaIS1_EE11_S_relocateEPS1_S4_S4_RS2_.exit.i
# in Loop: Header=BB1_20 Depth=2
movq (%rsp), %rdi # 8-byte Reload
testq %rdi, %rdi
je .LBB1_50
# %bb.49: # in Loop: Header=BB1_20 Depth=2
.cfi_escape 0x2e, 0x00
callq _ZdlPv
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
.LBB1_50: # %_ZNSt6vectorISt4pairIiiESaIS1_EE17_M_realloc_insertIJS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_.exit
# in Loop: Header=BB1_20 Depth=2
addq $8, %r13
leaq (%rbx,%r15,8), %rax
movq %r13, %r12
movq %rbx, (%rsp) # 8-byte Spill
movq 72(%rsp), %r15 # 8-byte Reload
movq 56(%rsp), %rbx # 8-byte Reload
movq 88(%rsp), %rcx # 8-byte Reload
movq 80(%rsp), %rdx # 8-byte Reload
jmp .LBB1_23
.LBB1_25:
movq %r12, %r14
subq (%rsp), %r14 # 8-byte Folded Reload
sarq $3, %r14
.Ltmp3:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movq %r14, %rsi
callq _ZNSo9_M_insertImEERSoT_
.Ltmp4:
# %bb.26: # %_ZNSolsEm.exit
.Ltmp5:
.cfi_escape 0x2e, 0x00
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp6:
# %bb.27: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit.preheader
movq (%rsp), %rax # 8-byte Reload
cmpq %rax, %r12
je .LBB1_34
# %bb.28: # %.lr.ph.preheader
movq %rax, %r12
cmpq $1, %r14
adcq $0, %r14
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_29: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl (%r12,%rbx,8), %esi
.Ltmp7:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZNSolsEi
.Ltmp8:
# %bb.30: # in Loop: Header=BB1_29 Depth=1
.Ltmp9:
movq %rax, %r13
.cfi_escape 0x2e, 0x00
movl $.L.str.2, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp10:
# %bb.31: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit96
# in Loop: Header=BB1_29 Depth=1
movl 4(%r12,%rbx,8), %esi
.Ltmp11:
.cfi_escape 0x2e, 0x00
movq %r13, %rdi
callq _ZNSolsEi
.Ltmp12:
# %bb.32: # in Loop: Header=BB1_29 Depth=1
.Ltmp13:
.cfi_escape 0x2e, 0x00
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp14:
# %bb.33: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit98
# in Loop: Header=BB1_29 Depth=1
incq %rbx
cmpq %rbx, %r14
jne .LBB1_29
.LBB1_34: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit._crit_edge
.Ltmp16:
.cfi_escape 0x2e, 0x00
callq hipDeviceReset
.Ltmp17:
# %bb.35:
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
callq _ZdlPv
.cfi_escape 0x2e, 0x00
movq 56(%rsp), %rdi # 8-byte Reload
callq _ZdlPv
movq (%rsp), %rdi # 8-byte Reload
testq %rdi, %rdi
je .LBB1_37
# %bb.36:
.cfi_escape 0x2e, 0x00
callq _ZdlPv
.LBB1_37: # %_ZNSt6vectorISt4pairIiiESaIS1_EED2Ev.exit
xorl %eax, %eax
addq $248, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_39:
.cfi_def_cfa_offset 304
.Ltmp19:
.cfi_escape 0x2e, 0x00
movl $.L.str.4, %edi
callq _ZSt20__throw_length_errorPKc
.Ltmp20:
# %bb.40: # %.noexc102
.LBB1_3:
movq stderr(%rip), %rbx
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
.cfi_escape 0x2e, 0x00
movl $.L.str.3, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $233, %r8d
jmp .LBB1_4
.LBB1_6:
movq stderr(%rip), %rbx
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
.cfi_escape 0x2e, 0x00
movl $.L.str.3, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $234, %r8d
jmp .LBB1_4
.LBB1_8:
movq stderr(%rip), %rbx
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
.cfi_escape 0x2e, 0x00
movl $.L.str.3, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $242, %r8d
jmp .LBB1_4
.LBB1_12:
movq stderr(%rip), %rbx
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
.cfi_escape 0x2e, 0x00
movl $.L.str.3, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $253, %r8d
jmp .LBB1_4
.LBB1_14:
movq stderr(%rip), %rbx
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
.cfi_escape 0x2e, 0x00
movl $.L.str.3, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $255, %r8d
jmp .LBB1_4
.LBB1_59:
movq stderr(%rip), %rbx
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl %eax, %ebp
callq hipGetErrorString
.cfi_escape 0x2e, 0x00
movl $.L.str.3, %esi
movl $.L.str, %ecx
movq %rbx, %rdi
movq %rax, %rdx
movl $267, %r8d # imm = 0x10B
.LBB1_4:
xorl %eax, %eax
callq fprintf
.cfi_escape 0x2e, 0x00
movl %ebp, %edi
callq exit
.LBB1_54:
.Ltmp18:
jmp .LBB1_52
.LBB1_55:
.Ltmp15:
movq %rax, %rbx
jmp .LBB1_56
.LBB1_51: # %.loopexit
.Ltmp2:
jmp .LBB1_52
.LBB1_53: # %.loopexit.split-lp
.Ltmp21:
.LBB1_52:
movq %rax, %rbx
movq (%rsp), %r12 # 8-byte Reload
.LBB1_56:
testq %r12, %r12
je .LBB1_58
# %bb.57:
.cfi_escape 0x2e, 0x00
movq %r12, %rdi
callq _ZdlPv
.LBB1_58: # %_ZNSt6vectorISt4pairIiiESaIS1_EED2Ev.exit100
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table1:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1
.uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2
.byte 0 # On action: cleanup
.uleb128 .Ltmp3-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp6-.Ltmp3 # Call between .Ltmp3 and .Ltmp6
.uleb128 .Ltmp18-.Lfunc_begin0 # jumps to .Ltmp18
.byte 0 # On action: cleanup
.uleb128 .Ltmp7-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp14-.Ltmp7 # Call between .Ltmp7 and .Ltmp14
.uleb128 .Ltmp15-.Lfunc_begin0 # jumps to .Ltmp15
.byte 0 # On action: cleanup
.uleb128 .Ltmp16-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Ltmp17-.Ltmp16 # Call between .Ltmp16 and .Ltmp17
.uleb128 .Ltmp18-.Lfunc_begin0 # jumps to .Ltmp18
.byte 0 # On action: cleanup
.uleb128 .Ltmp19-.Lfunc_begin0 # >> Call Site 6 <<
.uleb128 .Ltmp20-.Ltmp19 # Call between .Ltmp19 and .Ltmp20
.uleb128 .Ltmp21-.Lfunc_begin0 # jumps to .Ltmp21
.byte 0 # On action: cleanup
.uleb128 .Ltmp20-.Lfunc_begin0 # >> Call Site 7 <<
.uleb128 .Lfunc_end1-.Ltmp20 # Call between .Ltmp20 and .Lfunc_end1
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.p2align 2, 0x0
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11conv_kernelPKfPfiiiiiiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $1, (%rsp)
movl $cm, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $36, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type cm,@object # @cm
.local cm
.comm cm,36,16
.type _Z11conv_kernelPKfPfiiiiiiiii,@object # @_Z11conv_kernelPKfPfiiiiiiiii
.section .rodata,"a",@progbits
.globl _Z11conv_kernelPKfPfiiiiiiiii
.p2align 3, 0x0
_Z11conv_kernelPKfPfiiiiiiiii:
.quad _Z26__device_stub__conv_kernelPKfPfiiiiiiiii
.size _Z11conv_kernelPKfPfiiiiiiiii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/yottabytt/convolution_kernel/master/convolution.hip"
.size .L.str, 109
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\n"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz ","
.size .L.str.2, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "GPUassert: %s %s %d\n"
.size .L.str.3, 21
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "vector::_M_realloc_insert"
.size .L.str.4, 26
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11conv_kernelPKfPfiiiiiiiii"
.size .L__unnamed_1, 30
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "cm"
.size .L__unnamed_2, 3
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__conv_kernelPKfPfiiiiiiiii
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Unwind_Resume
.addrsig_sym cm
.addrsig_sym _Z11conv_kernelPKfPfiiiiiiiii
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
// Necessary for random numbers in CUDA
#include <curand_kernel.h>
#include <curand.h>
#define NUM_ITER 1000000000
#define TPB 128 // Threads PER block
#define NUM_THREADS 10000 // Total number of threads to execute
/**
* Function which, for each instance of the kernel, generates a random point
* and calculates whether or not it is within a circle.
*
* @param counts Array for each thread to store the total number of
* randomly generate points that were within a circle
* @param numIter Number of iterations / points each thread should make
* @param numThreads Number of threads that should be doing work
* @param curandState Array for each thread to store its own curandState
* structure
*/
__global__ void estimatePiKernel(unsigned int *counts, unsigned int numIter,
unsigned int numThreads,
curandState *randState) {
double x, y, distance;
// Unique ID of the current thread to determine what work to compute
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
// This thread has no work to do, exit
if (threadId > numThreads) return;
// Used threadId as a seed of randomness so that every thread is generating
// different random values
int seed = threadId;
curand_init(threadId, seed, 0, &randState[threadId]);
for (int iter = 0; iter < numIter; iter++) {
// Generate random x, y coordinates from 0.0 (exclusive) to 1.0
// (inclusive) for a point
x = (double) curand_uniform(&randState[threadId]);
y = (double) curand_uniform(&randState[threadId]);
// Distance from the origin of the circle
distance = sqrt((x * x) + (y * y));
// If the distance from the origin of the circle is less than or equal
// to 1, that means that the randomly generated point is inside the
// circle because the circle has a radius of 1. Increment number of
// points randomly generated within the circle
if (distance <= 1.0) counts[threadId]++;
}
}
/**
* Tally up the counts in an array indicating the number of randomly generated
* points that were inside a circle and estimate pi.
*
* @param counts Array of counts of points generate inside a circle
*/
void estimatePi(unsigned int *counts) {
unsigned int totalCount = 0;
// accumulate the counts of coins in the circle into totalCount
for (int index = 0; index < NUM_THREADS; index++) {
totalCount += counts[index];
}
printf("total count: %d\n", totalCount);
// Calculate pi according to the formula P(coin in circle) * 4 where
// P(coin in circle) is equivalents to (coins in circle) / (total coins)
double piEstimation = ((double) totalCount / (double) NUM_ITER) * 4.0;
printf("The result is %f\n", piEstimation);
}
/**
* Return a timestamp with double percision.
*/
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
int main() {
// Allocate space for curandState for each thread
curandState *randState;
cudaMalloc(&randState, NUM_THREADS * sizeof(curandState));
// Allocate space to keep track of counts of points generated in the circle
unsigned int *deviceCounts;
cudaMalloc(&deviceCounts, NUM_THREADS * sizeof(unsigned int));
// Allocate space to copy the GPU result back to the CPU
unsigned int *hostCounts = (unsigned int*) malloc(
NUM_THREADS * sizeof(unsigned int));
// Set all of the memory to 0
cudaMemset(deviceCounts, 0, NUM_THREADS * sizeof(unsigned int));
double startTime = cpuSecond();
// Launch the kernel
estimatePiKernel <<<(NUM_THREADS + TPB - 1) / TPB, TPB>>> (
deviceCounts, NUM_ITER / NUM_THREADS, NUM_THREADS, randState);
// Watch for the kernel to finish
cudaDeviceSynchronize();
printf("Total time: %f\n", cpuSecond() - startTime);
// Copy GPU counts to the CPU
cudaMemcpy(
hostCounts, deviceCounts,
NUM_THREADS * sizeof(unsigned int), cudaMemcpyDeviceToHost
);
// Print pi estimation
estimatePi(hostCounts);
return 0;
} | .file "tmpxft_000f7565_00000000-6_exercise_bonus.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2276:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2276:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "total count: %d\n"
.LC3:
.string "The result is %f\n"
.text
.globl _Z10estimatePiPj
.type _Z10estimatePiPj, @function
_Z10estimatePiPj:
.LFB2271:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq 40000(%rdi), %rdx
movl $0, %eax
.L4:
addl (%rdi), %eax
movl %eax, %ebx
addq $4, %rdi
cmpq %rdx, %rdi
jne .L4
movl %eax, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebx, %ebx
pxor %xmm0, %xmm0
cvtsi2sdq %rbx, %xmm0
divsd .LC1(%rip), %xmm0
mulsd .LC2(%rip), %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2271:
.size _Z10estimatePiPj, .-_Z10estimatePiPj
.globl _Z9cpuSecondv
.type _Z9cpuSecondv, @function
_Z9cpuSecondv:
.LFB2272:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LC4(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2272:
.size _Z9cpuSecondv, .-_Z9cpuSecondv
.globl _Z58__device_stub__Z16estimatePiKernelPjjjP17curandStateXORWOWPjjjP17curandStateXORWOW
.type _Z58__device_stub__Z16estimatePiKernelPjjjP17curandStateXORWOWPjjjP17curandStateXORWOW, @function
_Z58__device_stub__Z16estimatePiKernelPjjjP17curandStateXORWOWPjjjP17curandStateXORWOW:
.LFB2298:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16estimatePiKernelPjjjP17curandStateXORWOW(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2298:
.size _Z58__device_stub__Z16estimatePiKernelPjjjP17curandStateXORWOWPjjjP17curandStateXORWOW, .-_Z58__device_stub__Z16estimatePiKernelPjjjP17curandStateXORWOWPjjjP17curandStateXORWOW
.globl _Z16estimatePiKernelPjjjP17curandStateXORWOW
.type _Z16estimatePiKernelPjjjP17curandStateXORWOW, @function
_Z16estimatePiKernelPjjjP17curandStateXORWOW:
.LFB2299:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z58__device_stub__Z16estimatePiKernelPjjjP17curandStateXORWOWPjjjP17curandStateXORWOW
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2299:
.size _Z16estimatePiKernelPjjjP17curandStateXORWOW, .-_Z16estimatePiKernelPjjjP17curandStateXORWOW
.section .rodata.str1.1
.LC5:
.string "Total time: %f\n"
.text
.globl main
.type main, @function
main:
.LFB2273:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $64, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 16(%rsp), %rdi
movl $480000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $40000, %esi
call cudaMalloc@PLT
movl $40000, %edi
call malloc@PLT
movq %rax, %rbx
movl $40000, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
call _Z9cpuSecondv
movsd %xmm0, 8(%rsp)
movl $128, 44(%rsp)
movl $1, 48(%rsp)
movl $79, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L22:
call cudaDeviceSynchronize@PLT
call _Z9cpuSecondv
subsd 8(%rsp), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movl $40000, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call _Z10estimatePiPj
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L26
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
movq 16(%rsp), %rcx
movl $10000, %edx
movl $100000, %esi
movq 24(%rsp), %rdi
call _Z58__device_stub__Z16estimatePiKernelPjjjP17curandStateXORWOWPjjjP17curandStateXORWOW
jmp .L22
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2273:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC6:
.string "_Z16estimatePiKernelPjjjP17curandStateXORWOW"
.section .rodata.str1.1
.LC7:
.string "precalc_xorwow_matrix"
.LC8:
.string "precalc_xorwow_offset_matrix"
.LC9:
.string "mrg32k3aM1"
.LC10:
.string "mrg32k3aM2"
.LC11:
.string "mrg32k3aM1SubSeq"
.LC12:
.string "mrg32k3aM2SubSeq"
.LC13:
.string "mrg32k3aM1Seq"
.LC14:
.string "mrg32k3aM2Seq"
.LC15:
.string "__cr_lgamma_table"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2301:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z16estimatePiKernelPjjjP17curandStateXORWOW(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21precalc_xorwow_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM1(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM2(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM1Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM2Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $72, %r9d
movl $0, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17__cr_lgamma_table(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2301:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL17__cr_lgamma_table
.comm _ZL17__cr_lgamma_table,72,32
.local _ZL13mrg32k3aM2Seq
.comm _ZL13mrg32k3aM2Seq,2304,32
.local _ZL13mrg32k3aM1Seq
.comm _ZL13mrg32k3aM1Seq,2304,32
.local _ZL16mrg32k3aM2SubSeq
.comm _ZL16mrg32k3aM2SubSeq,2016,32
.local _ZL16mrg32k3aM1SubSeq
.comm _ZL16mrg32k3aM1SubSeq,2016,32
.local _ZL10mrg32k3aM2
.comm _ZL10mrg32k3aM2,2304,32
.local _ZL10mrg32k3aM1
.comm _ZL10mrg32k3aM1,2304,32
.local _ZL28precalc_xorwow_offset_matrix
.comm _ZL28precalc_xorwow_offset_matrix,102400,32
.local _ZL21precalc_xorwow_matrix
.comm _ZL21precalc_xorwow_matrix,102400,32
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1104006501
.align 8
.LC2:
.long 0
.long 1074790400
.align 8
.LC4:
.long -1598689907
.long 1051772663
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
// Necessary for random numbers in CUDA
#include <curand_kernel.h>
#include <curand.h>
#define NUM_ITER 1000000000
#define TPB 128 // Threads PER block
#define NUM_THREADS 10000 // Total number of threads to execute
/**
* Function which, for each instance of the kernel, generates a random point
* and calculates whether or not it is within a circle.
*
* @param counts Array for each thread to store the total number of
* randomly generate points that were within a circle
* @param numIter Number of iterations / points each thread should make
* @param numThreads Number of threads that should be doing work
* @param curandState Array for each thread to store its own curandState
* structure
*/
__global__ void estimatePiKernel(unsigned int *counts, unsigned int numIter,
unsigned int numThreads,
curandState *randState) {
double x, y, distance;
// Unique ID of the current thread to determine what work to compute
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
// This thread has no work to do, exit
if (threadId > numThreads) return;
// Used threadId as a seed of randomness so that every thread is generating
// different random values
int seed = threadId;
curand_init(threadId, seed, 0, &randState[threadId]);
for (int iter = 0; iter < numIter; iter++) {
// Generate random x, y coordinates from 0.0 (exclusive) to 1.0
// (inclusive) for a point
x = (double) curand_uniform(&randState[threadId]);
y = (double) curand_uniform(&randState[threadId]);
// Distance from the origin of the circle
distance = sqrt((x * x) + (y * y));
// If the distance from the origin of the circle is less than or equal
// to 1, that means that the randomly generated point is inside the
// circle because the circle has a radius of 1. Increment number of
// points randomly generated within the circle
if (distance <= 1.0) counts[threadId]++;
}
}
/**
* Tally up the counts in an array indicating the number of randomly generated
* points that were inside a circle and estimate pi.
*
* @param counts Array of counts of points generate inside a circle
*/
void estimatePi(unsigned int *counts) {
unsigned int totalCount = 0;
// accumulate the counts of coins in the circle into totalCount
for (int index = 0; index < NUM_THREADS; index++) {
totalCount += counts[index];
}
printf("total count: %d\n", totalCount);
// Calculate pi according to the formula P(coin in circle) * 4 where
// P(coin in circle) is equivalents to (coins in circle) / (total coins)
double piEstimation = ((double) totalCount / (double) NUM_ITER) * 4.0;
printf("The result is %f\n", piEstimation);
}
/**
* Return a timestamp with double percision.
*/
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
int main() {
// Allocate space for curandState for each thread
curandState *randState;
cudaMalloc(&randState, NUM_THREADS * sizeof(curandState));
// Allocate space to keep track of counts of points generated in the circle
unsigned int *deviceCounts;
cudaMalloc(&deviceCounts, NUM_THREADS * sizeof(unsigned int));
// Allocate space to copy the GPU result back to the CPU
unsigned int *hostCounts = (unsigned int*) malloc(
NUM_THREADS * sizeof(unsigned int));
// Set all of the memory to 0
cudaMemset(deviceCounts, 0, NUM_THREADS * sizeof(unsigned int));
double startTime = cpuSecond();
// Launch the kernel
estimatePiKernel <<<(NUM_THREADS + TPB - 1) / TPB, TPB>>> (
deviceCounts, NUM_ITER / NUM_THREADS, NUM_THREADS, randState);
// Watch for the kernel to finish
cudaDeviceSynchronize();
printf("Total time: %f\n", cpuSecond() - startTime);
// Copy GPU counts to the CPU
cudaMemcpy(
hostCounts, deviceCounts,
NUM_THREADS * sizeof(unsigned int), cudaMemcpyDeviceToHost
);
// Print pi estimation
estimatePi(hostCounts);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
// Necessary for random numbers in CUDA
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand.h>
#define NUM_ITER 1000000000
#define TPB 128 // Threads PER block
#define NUM_THREADS 10000 // Total number of threads to execute
/**
* Function which, for each instance of the kernel, generates a random point
* and calculates whether or not it is within a circle.
*
* @param counts Array for each thread to store the total number of
* randomly generate points that were within a circle
* @param numIter Number of iterations / points each thread should make
* @param numThreads Number of threads that should be doing work
* @param curandState Array for each thread to store its own curandState
* structure
*/
__global__ void estimatePiKernel(unsigned int *counts, unsigned int numIter,
unsigned int numThreads,
hiprandState *randState) {
double x, y, distance;
// Unique ID of the current thread to determine what work to compute
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
// This thread has no work to do, exit
if (threadId > numThreads) return;
// Used threadId as a seed of randomness so that every thread is generating
// different random values
int seed = threadId;
hiprand_init(threadId, seed, 0, &randState[threadId]);
for (int iter = 0; iter < numIter; iter++) {
// Generate random x, y coordinates from 0.0 (exclusive) to 1.0
// (inclusive) for a point
x = (double) hiprand_uniform(&randState[threadId]);
y = (double) hiprand_uniform(&randState[threadId]);
// Distance from the origin of the circle
distance = sqrt((x * x) + (y * y));
// If the distance from the origin of the circle is less than or equal
// to 1, that means that the randomly generated point is inside the
// circle because the circle has a radius of 1. Increment number of
// points randomly generated within the circle
if (distance <= 1.0) counts[threadId]++;
}
}
/**
* Tally up the counts in an array indicating the number of randomly generated
* points that were inside a circle and estimate pi.
*
* @param counts Array of counts of points generate inside a circle
*/
void estimatePi(unsigned int *counts) {
unsigned int totalCount = 0;
// accumulate the counts of coins in the circle into totalCount
for (int index = 0; index < NUM_THREADS; index++) {
totalCount += counts[index];
}
printf("total count: %d\n", totalCount);
// Calculate pi according to the formula P(coin in circle) * 4 where
// P(coin in circle) is equivalents to (coins in circle) / (total coins)
double piEstimation = ((double) totalCount / (double) NUM_ITER) * 4.0;
printf("The result is %f\n", piEstimation);
}
/**
* Return a timestamp with double percision.
*/
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
int main() {
// Allocate space for curandState for each thread
hiprandState *randState;
hipMalloc(&randState, NUM_THREADS * sizeof(hiprandState));
// Allocate space to keep track of counts of points generated in the circle
unsigned int *deviceCounts;
hipMalloc(&deviceCounts, NUM_THREADS * sizeof(unsigned int));
// Allocate space to copy the GPU result back to the CPU
unsigned int *hostCounts = (unsigned int*) malloc(
NUM_THREADS * sizeof(unsigned int));
// Set all of the memory to 0
hipMemset(deviceCounts, 0, NUM_THREADS * sizeof(unsigned int));
double startTime = cpuSecond();
// Launch the kernel
estimatePiKernel <<<(NUM_THREADS + TPB - 1) / TPB, TPB>>> (
deviceCounts, NUM_ITER / NUM_THREADS, NUM_THREADS, randState);
// Watch for the kernel to finish
hipDeviceSynchronize();
printf("Total time: %f\n", cpuSecond() - startTime);
// Copy GPU counts to the CPU
hipMemcpy(
hostCounts, deviceCounts,
NUM_THREADS * sizeof(unsigned int), hipMemcpyDeviceToHost
);
// Print pi estimation
estimatePi(hostCounts);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
// Necessary for random numbers in CUDA
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand.h>
#define NUM_ITER 1000000000
#define TPB 128 // Threads PER block
#define NUM_THREADS 10000 // Total number of threads to execute
/**
* Function which, for each instance of the kernel, generates a random point
* and calculates whether or not it is within a circle.
*
* @param counts Array for each thread to store the total number of
* randomly generate points that were within a circle
* @param numIter Number of iterations / points each thread should make
* @param numThreads Number of threads that should be doing work
* @param curandState Array for each thread to store its own curandState
* structure
*/
__global__ void estimatePiKernel(unsigned int *counts, unsigned int numIter,
unsigned int numThreads,
hiprandState *randState) {
double x, y, distance;
// Unique ID of the current thread to determine what work to compute
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
// This thread has no work to do, exit
if (threadId > numThreads) return;
// Used threadId as a seed of randomness so that every thread is generating
// different random values
int seed = threadId;
hiprand_init(threadId, seed, 0, &randState[threadId]);
for (int iter = 0; iter < numIter; iter++) {
// Generate random x, y coordinates from 0.0 (exclusive) to 1.0
// (inclusive) for a point
x = (double) hiprand_uniform(&randState[threadId]);
y = (double) hiprand_uniform(&randState[threadId]);
// Distance from the origin of the circle
distance = sqrt((x * x) + (y * y));
// If the distance from the origin of the circle is less than or equal
// to 1, that means that the randomly generated point is inside the
// circle because the circle has a radius of 1. Increment number of
// points randomly generated within the circle
if (distance <= 1.0) counts[threadId]++;
}
}
/**
* Tally up the counts in an array indicating the number of randomly generated
* points that were inside a circle and estimate pi.
*
* @param counts Array of counts of points generate inside a circle
*/
void estimatePi(unsigned int *counts) {
unsigned int totalCount = 0;
// accumulate the counts of coins in the circle into totalCount
for (int index = 0; index < NUM_THREADS; index++) {
totalCount += counts[index];
}
printf("total count: %d\n", totalCount);
// Calculate pi according to the formula P(coin in circle) * 4 where
// P(coin in circle) is equivalents to (coins in circle) / (total coins)
double piEstimation = ((double) totalCount / (double) NUM_ITER) * 4.0;
printf("The result is %f\n", piEstimation);
}
/**
* Return a timestamp with double percision.
*/
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
int main() {
// Allocate space for curandState for each thread
hiprandState *randState;
hipMalloc(&randState, NUM_THREADS * sizeof(hiprandState));
// Allocate space to keep track of counts of points generated in the circle
unsigned int *deviceCounts;
hipMalloc(&deviceCounts, NUM_THREADS * sizeof(unsigned int));
// Allocate space to copy the GPU result back to the CPU
unsigned int *hostCounts = (unsigned int*) malloc(
NUM_THREADS * sizeof(unsigned int));
// Set all of the memory to 0
hipMemset(deviceCounts, 0, NUM_THREADS * sizeof(unsigned int));
double startTime = cpuSecond();
// Launch the kernel
estimatePiKernel <<<(NUM_THREADS + TPB - 1) / TPB, TPB>>> (
deviceCounts, NUM_ITER / NUM_THREADS, NUM_THREADS, randState);
// Watch for the kernel to finish
hipDeviceSynchronize();
printf("Total time: %f\n", cpuSecond() - startTime);
// Copy GPU counts to the CPU
hipMemcpy(
hostCounts, deviceCounts,
NUM_THREADS * sizeof(unsigned int), hipMemcpyDeviceToHost
);
// Print pi estimation
estimatePi(hostCounts);
return 0;
} | .text
.file "exercise_bonus.hip"
.globl _Z31__device_stub__estimatePiKernelPjjjP12hiprandState # -- Begin function _Z31__device_stub__estimatePiKernelPjjjP12hiprandState
.p2align 4, 0x90
.type _Z31__device_stub__estimatePiKernelPjjjP12hiprandState,@function
_Z31__device_stub__estimatePiKernelPjjjP12hiprandState: # @_Z31__device_stub__estimatePiKernelPjjjP12hiprandState
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movl %edx, 8(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16estimatePiKernelPjjjP12hiprandState, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__estimatePiKernelPjjjP12hiprandState, .Lfunc_end0-_Z31__device_stub__estimatePiKernelPjjjP12hiprandState
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z10estimatePiPj
.LCPI1_0:
.quad 0x41cdcd6500000000 # double 1.0E+9
.LCPI1_1:
.quad 0x4010000000000000 # double 4
.text
.globl _Z10estimatePiPj
.p2align 4, 0x90
.type _Z10estimatePiPj,@function
_Z10estimatePiPj: # @_Z10estimatePiPj
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
xorl %eax, %eax
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
addl (%rdi,%rax,4), %ebx
incq %rax
cmpq $10000, %rax # imm = 0x2710
jne .LBB1_1
# %bb.2:
movl $.L.str, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
movl %ebx, %eax
cvtsi2sd %rax, %xmm0
divsd .LCPI1_0(%rip), %xmm0
mulsd .LCPI1_1(%rip), %xmm0
movl $.L.str.1, %edi
movb $1, %al
popq %rbx
.cfi_def_cfa_offset 8
jmp printf # TAILCALL
.Lfunc_end1:
.size _Z10estimatePiPj, .Lfunc_end1-_Z10estimatePiPj
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z9cpuSecondv
.LCPI2_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z9cpuSecondv
.p2align 4, 0x90
.type _Z9cpuSecondv,@function
_Z9cpuSecondv: # @_Z9cpuSecondv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI2_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z9cpuSecondv, .Lfunc_end2-_Z9cpuSecondv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.LCPI3_1:
.quad 0x41cdcd6500000000 # double 1.0E+9
.LCPI3_2:
.quad 0x4010000000000000 # double 4
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $136, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
leaq 64(%rsp), %rdi
movl $480000, %esi # imm = 0x75300
callq hipMalloc
movq %rsp, %rdi
movl $40000, %esi # imm = 0x9C40
callq hipMalloc
movl $40000, %edi # imm = 0x9C40
callq malloc
movq %rax, %r14
movq (%rsp), %rdi
xorl %ebx, %ebx
movl $40000, %edx # imm = 0x9C40
xorl %esi, %esi
callq hipMemset
leaq 16(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 16(%rsp), %xmm0
cvtsi2sdq 24(%rsp), %xmm1
mulsd .LCPI3_0(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, 56(%rsp) # 8-byte Spill
movabsq $4294967375, %rdi # imm = 0x10000004F
leaq 49(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movq (%rsp), %rax
movq 64(%rsp), %rcx
movq %rax, 128(%rsp)
movl $100000, 12(%rsp) # imm = 0x186A0
movl $10000, 8(%rsp) # imm = 0x2710
movq %rcx, 120(%rsp)
leaq 128(%rsp), %rax
movq %rax, 16(%rsp)
leaq 12(%rsp), %rax
movq %rax, 24(%rsp)
leaq 8(%rsp), %rax
movq %rax, 32(%rsp)
leaq 120(%rsp), %rax
movq %rax, 40(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z16estimatePiKernelPjjjP12hiprandState, %edi
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
callq hipDeviceSynchronize
leaq 16(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm1, %xmm1
cvtsi2sdq 16(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 24(%rsp), %xmm0
mulsd .LCPI3_0(%rip), %xmm0
addsd %xmm1, %xmm0
subsd 56(%rsp), %xmm0 # 8-byte Folded Reload
movl $.L.str.2, %edi
movb $1, %al
callq printf
movq (%rsp), %rsi
movl $40000, %edx # imm = 0x9C40
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %eax, %eax
.p2align 4, 0x90
.LBB3_3: # =>This Inner Loop Header: Depth=1
addl (%r14,%rax,4), %ebx
incq %rax
cmpq $10000, %rax # imm = 0x2710
jne .LBB3_3
# %bb.4: # %_Z10estimatePiPj.exit
movl $.L.str, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
movl %ebx, %eax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
divsd .LCPI3_1(%rip), %xmm0
mulsd .LCPI3_2(%rip), %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $136, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16estimatePiKernelPjjjP12hiprandState, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16estimatePiKernelPjjjP12hiprandState,@object # @_Z16estimatePiKernelPjjjP12hiprandState
.section .rodata,"a",@progbits
.globl _Z16estimatePiKernelPjjjP12hiprandState
.p2align 3, 0x0
_Z16estimatePiKernelPjjjP12hiprandState:
.quad _Z31__device_stub__estimatePiKernelPjjjP12hiprandState
.size _Z16estimatePiKernelPjjjP12hiprandState, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "total count: %d\n"
.size .L.str, 17
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "The result is %f\n"
.size .L.str.1, 18
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Total time: %f\n"
.size .L.str.2, 16
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16estimatePiKernelPjjjP12hiprandState"
.size .L__unnamed_1, 40
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__estimatePiKernelPjjjP12hiprandState
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16estimatePiKernelPjjjP12hiprandState
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000f7565_00000000-6_exercise_bonus.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2276:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2276:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "total count: %d\n"
.LC3:
.string "The result is %f\n"
.text
.globl _Z10estimatePiPj
.type _Z10estimatePiPj, @function
_Z10estimatePiPj:
.LFB2271:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq 40000(%rdi), %rdx
movl $0, %eax
.L4:
addl (%rdi), %eax
movl %eax, %ebx
addq $4, %rdi
cmpq %rdx, %rdi
jne .L4
movl %eax, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebx, %ebx
pxor %xmm0, %xmm0
cvtsi2sdq %rbx, %xmm0
divsd .LC1(%rip), %xmm0
mulsd .LC2(%rip), %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2271:
.size _Z10estimatePiPj, .-_Z10estimatePiPj
.globl _Z9cpuSecondv
.type _Z9cpuSecondv, @function
_Z9cpuSecondv:
.LFB2272:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LC4(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2272:
.size _Z9cpuSecondv, .-_Z9cpuSecondv
.globl _Z58__device_stub__Z16estimatePiKernelPjjjP17curandStateXORWOWPjjjP17curandStateXORWOW
.type _Z58__device_stub__Z16estimatePiKernelPjjjP17curandStateXORWOWPjjjP17curandStateXORWOW, @function
_Z58__device_stub__Z16estimatePiKernelPjjjP17curandStateXORWOWPjjjP17curandStateXORWOW:
.LFB2298:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16estimatePiKernelPjjjP17curandStateXORWOW(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2298:
.size _Z58__device_stub__Z16estimatePiKernelPjjjP17curandStateXORWOWPjjjP17curandStateXORWOW, .-_Z58__device_stub__Z16estimatePiKernelPjjjP17curandStateXORWOWPjjjP17curandStateXORWOW
.globl _Z16estimatePiKernelPjjjP17curandStateXORWOW
.type _Z16estimatePiKernelPjjjP17curandStateXORWOW, @function
_Z16estimatePiKernelPjjjP17curandStateXORWOW:
.LFB2299:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z58__device_stub__Z16estimatePiKernelPjjjP17curandStateXORWOWPjjjP17curandStateXORWOW
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2299:
.size _Z16estimatePiKernelPjjjP17curandStateXORWOW, .-_Z16estimatePiKernelPjjjP17curandStateXORWOW
.section .rodata.str1.1
.LC5:
.string "Total time: %f\n"
.text
.globl main
.type main, @function
main:
.LFB2273:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $64, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 16(%rsp), %rdi
movl $480000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $40000, %esi
call cudaMalloc@PLT
movl $40000, %edi
call malloc@PLT
movq %rax, %rbx
movl $40000, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
call _Z9cpuSecondv
movsd %xmm0, 8(%rsp)
movl $128, 44(%rsp)
movl $1, 48(%rsp)
movl $79, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L22:
call cudaDeviceSynchronize@PLT
call _Z9cpuSecondv
subsd 8(%rsp), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movl $40000, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call _Z10estimatePiPj
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L26
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
movq 16(%rsp), %rcx
movl $10000, %edx
movl $100000, %esi
movq 24(%rsp), %rdi
call _Z58__device_stub__Z16estimatePiKernelPjjjP17curandStateXORWOWPjjjP17curandStateXORWOW
jmp .L22
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2273:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC6:
.string "_Z16estimatePiKernelPjjjP17curandStateXORWOW"
.section .rodata.str1.1
.LC7:
.string "precalc_xorwow_matrix"
.LC8:
.string "precalc_xorwow_offset_matrix"
.LC9:
.string "mrg32k3aM1"
.LC10:
.string "mrg32k3aM2"
.LC11:
.string "mrg32k3aM1SubSeq"
.LC12:
.string "mrg32k3aM2SubSeq"
.LC13:
.string "mrg32k3aM1Seq"
.LC14:
.string "mrg32k3aM2Seq"
.LC15:
.string "__cr_lgamma_table"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2301:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z16estimatePiKernelPjjjP17curandStateXORWOW(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21precalc_xorwow_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM1(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM2(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM1Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM2Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $72, %r9d
movl $0, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17__cr_lgamma_table(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2301:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL17__cr_lgamma_table
.comm _ZL17__cr_lgamma_table,72,32
.local _ZL13mrg32k3aM2Seq
.comm _ZL13mrg32k3aM2Seq,2304,32
.local _ZL13mrg32k3aM1Seq
.comm _ZL13mrg32k3aM1Seq,2304,32
.local _ZL16mrg32k3aM2SubSeq
.comm _ZL16mrg32k3aM2SubSeq,2016,32
.local _ZL16mrg32k3aM1SubSeq
.comm _ZL16mrg32k3aM1SubSeq,2016,32
.local _ZL10mrg32k3aM2
.comm _ZL10mrg32k3aM2,2304,32
.local _ZL10mrg32k3aM1
.comm _ZL10mrg32k3aM1,2304,32
.local _ZL28precalc_xorwow_offset_matrix
.comm _ZL28precalc_xorwow_offset_matrix,102400,32
.local _ZL21precalc_xorwow_matrix
.comm _ZL21precalc_xorwow_matrix,102400,32
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1104006501
.align 8
.LC2:
.long 0
.long 1074790400
.align 8
.LC4:
.long -1598689907
.long 1051772663
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "exercise_bonus.hip"
.globl _Z31__device_stub__estimatePiKernelPjjjP12hiprandState # -- Begin function _Z31__device_stub__estimatePiKernelPjjjP12hiprandState
.p2align 4, 0x90
.type _Z31__device_stub__estimatePiKernelPjjjP12hiprandState,@function
_Z31__device_stub__estimatePiKernelPjjjP12hiprandState: # @_Z31__device_stub__estimatePiKernelPjjjP12hiprandState
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movl %edx, 8(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16estimatePiKernelPjjjP12hiprandState, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__estimatePiKernelPjjjP12hiprandState, .Lfunc_end0-_Z31__device_stub__estimatePiKernelPjjjP12hiprandState
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z10estimatePiPj
.LCPI1_0:
.quad 0x41cdcd6500000000 # double 1.0E+9
.LCPI1_1:
.quad 0x4010000000000000 # double 4
.text
.globl _Z10estimatePiPj
.p2align 4, 0x90
.type _Z10estimatePiPj,@function
_Z10estimatePiPj: # @_Z10estimatePiPj
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
xorl %eax, %eax
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
addl (%rdi,%rax,4), %ebx
incq %rax
cmpq $10000, %rax # imm = 0x2710
jne .LBB1_1
# %bb.2:
movl $.L.str, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
movl %ebx, %eax
cvtsi2sd %rax, %xmm0
divsd .LCPI1_0(%rip), %xmm0
mulsd .LCPI1_1(%rip), %xmm0
movl $.L.str.1, %edi
movb $1, %al
popq %rbx
.cfi_def_cfa_offset 8
jmp printf # TAILCALL
.Lfunc_end1:
.size _Z10estimatePiPj, .Lfunc_end1-_Z10estimatePiPj
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z9cpuSecondv
.LCPI2_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z9cpuSecondv
.p2align 4, 0x90
.type _Z9cpuSecondv,@function
_Z9cpuSecondv: # @_Z9cpuSecondv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI2_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z9cpuSecondv, .Lfunc_end2-_Z9cpuSecondv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.LCPI3_1:
.quad 0x41cdcd6500000000 # double 1.0E+9
.LCPI3_2:
.quad 0x4010000000000000 # double 4
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $136, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
leaq 64(%rsp), %rdi
movl $480000, %esi # imm = 0x75300
callq hipMalloc
movq %rsp, %rdi
movl $40000, %esi # imm = 0x9C40
callq hipMalloc
movl $40000, %edi # imm = 0x9C40
callq malloc
movq %rax, %r14
movq (%rsp), %rdi
xorl %ebx, %ebx
movl $40000, %edx # imm = 0x9C40
xorl %esi, %esi
callq hipMemset
leaq 16(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 16(%rsp), %xmm0
cvtsi2sdq 24(%rsp), %xmm1
mulsd .LCPI3_0(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, 56(%rsp) # 8-byte Spill
movabsq $4294967375, %rdi # imm = 0x10000004F
leaq 49(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movq (%rsp), %rax
movq 64(%rsp), %rcx
movq %rax, 128(%rsp)
movl $100000, 12(%rsp) # imm = 0x186A0
movl $10000, 8(%rsp) # imm = 0x2710
movq %rcx, 120(%rsp)
leaq 128(%rsp), %rax
movq %rax, 16(%rsp)
leaq 12(%rsp), %rax
movq %rax, 24(%rsp)
leaq 8(%rsp), %rax
movq %rax, 32(%rsp)
leaq 120(%rsp), %rax
movq %rax, 40(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z16estimatePiKernelPjjjP12hiprandState, %edi
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
callq hipDeviceSynchronize
leaq 16(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm1, %xmm1
cvtsi2sdq 16(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 24(%rsp), %xmm0
mulsd .LCPI3_0(%rip), %xmm0
addsd %xmm1, %xmm0
subsd 56(%rsp), %xmm0 # 8-byte Folded Reload
movl $.L.str.2, %edi
movb $1, %al
callq printf
movq (%rsp), %rsi
movl $40000, %edx # imm = 0x9C40
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %eax, %eax
.p2align 4, 0x90
.LBB3_3: # =>This Inner Loop Header: Depth=1
addl (%r14,%rax,4), %ebx
incq %rax
cmpq $10000, %rax # imm = 0x2710
jne .LBB3_3
# %bb.4: # %_Z10estimatePiPj.exit
movl $.L.str, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
movl %ebx, %eax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
divsd .LCPI3_1(%rip), %xmm0
mulsd .LCPI3_2(%rip), %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $136, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16estimatePiKernelPjjjP12hiprandState, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16estimatePiKernelPjjjP12hiprandState,@object # @_Z16estimatePiKernelPjjjP12hiprandState
.section .rodata,"a",@progbits
.globl _Z16estimatePiKernelPjjjP12hiprandState
.p2align 3, 0x0
_Z16estimatePiKernelPjjjP12hiprandState:
.quad _Z31__device_stub__estimatePiKernelPjjjP12hiprandState
.size _Z16estimatePiKernelPjjjP12hiprandState, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "total count: %d\n"
.size .L.str, 17
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "The result is %f\n"
.size .L.str.1, 18
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Total time: %f\n"
.size .L.str.2, 16
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16estimatePiKernelPjjjP12hiprandState"
.size .L__unnamed_1, 40
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__estimatePiKernelPjjjP12hiprandState
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16estimatePiKernelPjjjP12hiprandState
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cstdlib>
#include <iostream>
#include <stdio.h>
#include <cmath>
#include <cuda_runtime.h>
#include <time.h>
#include "device_launch_parameters.h"
#include "cuda.h"
using namespace std;
#define Nn 4096
#define Ni 25088
#define BATCH_SIZE 16
#define BLOCK_SIZE 32
#define BlockSize2D 16
#define VTYPE float
/*
* synapse (w) is (Nn x Ni)^T
* neuron_i (x) is (BATCH_SIZE x Ni)
* neuron_n (y) is (BATCH_SIZE x Nn)
*
* y = Xw^T
*/
void init_layer(VTYPE* h_neuron_i, VTYPE* h_neuron_n, VTYPE* synapse) {
for (int i = 0; i < Nn; i++) {
h_neuron_n[i] = rand() / (VTYPE)RAND_MAX;
}
for (int i = 0; i < Ni * BATCH_SIZE; i++) {
h_neuron_i[i] = rand() / (VTYPE)RAND_MAX;
}
for (int i = 0; i < Ni * Nn; i++) {
synapse[i] = rand() / (VTYPE)RAND_MAX;
}
}
__launch_bounds__(1024,2)
__global__ void d_MatMul_simple1(const VTYPE* d_neuron_i, VTYPE* d_neuron_n, const VTYPE* synapse) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
VTYPE temp = 0.0f;
if (col < Nn && row < BATCH_SIZE) {
#pragma unroll
for (int i = 0; i < Ni; i++) {
temp += d_neuron_i[row * Ni + i] * synapse[col + Nn * i];
}
d_neuron_n[row * Nn + col] = temp;
}
}
__global__ void d_MatMul_simple2(const VTYPE* d_neuron_i, VTYPE* d_neuron_n, const VTYPE* d_synapse) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ VTYPE neuron_i[BlockSize2D][BlockSize2D];
VTYPE temp = 0.0f;
#pragma unroll
for (int i = 0; i < Ni; i += BlockSize2D) {
if (row < BATCH_SIZE && i + threadIdx.x < Ni) {
neuron_i[threadIdx.y][threadIdx.x] = d_neuron_i[row * Ni + i + threadIdx.x];
}
else {
neuron_i[threadIdx.y][threadIdx.x] = 0.0f;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BlockSize2D; j++) {
temp += neuron_i[threadIdx.y][j] * d_synapse[(j + i)* Nn + col];
}
__syncthreads();
}
if (col < Nn && row < BATCH_SIZE) {
d_neuron_n[row * Nn + col] = temp;
}
}
__global__ void d_MatMul_simple3(const VTYPE* d_neuron_i, VTYPE* d_neuron_n, const VTYPE* d_synapse) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ VTYPE synapse[BlockSize2D][BlockSize2D];
__shared__ VTYPE neuron[BlockSize2D][BlockSize2D];
// MxK = MxN * NxK
VTYPE temp = 0.0f;
#pragma unroll
for (int i = 0; i < (Ni - 1) / BlockSize2D + 1; i++) {
if (row < BATCH_SIZE && i * BlockSize2D + threadIdx.x < Ni) {
neuron[threadIdx.y][threadIdx.x] = d_neuron_i[row * Ni + i * BlockSize2D + threadIdx.x];
}
else {
neuron[threadIdx.y][threadIdx.x] = 0.0f;
}
if (i * BlockSize2D + threadIdx.y < Ni && col < Nn) {
synapse[threadIdx.y][threadIdx.x] = d_synapse[(i * BlockSize2D + threadIdx.y) * Nn + col];
}
else {
synapse[threadIdx.y][threadIdx.x] = 0.0f;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BlockSize2D; j++) {
temp += neuron[threadIdx.y][j] * synapse[j][threadIdx.x];
}
__syncthreads();
}
if (row < BATCH_SIZE && col < Nn) {
d_neuron_n[row * Nn + col] = temp;
}
}
__global__ void d_test(VTYPE* d_synapse, VTYPE* d_neuron_i) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_neuron_i[idx] *= 1.1f;
}
bool compare(VTYPE* neuron1, VTYPE* neuron2) {
bool good = true;
#pragma unroll
for (int k = 0; k < BATCH_SIZE; k++) {
#pragma unroll
for (int i = 0; i < Nn; i++) {
if (fabs(neuron1[k * Nn + i] - neuron2[k * Nn + i]) > 1e-2)
{
good = false;
printf("At index (%d, %d) \t Host result: %lf \t Device result: %lf \n", k, i, neuron1[k * Nn + i], neuron2[k * Nn + i]);
}
}
}
return good;
}
int main()
{
// Initialize arrays on host
VTYPE* h_neuron_i = (VTYPE*)malloc(Ni * BATCH_SIZE *sizeof(VTYPE));
VTYPE* h_neuron_n1 = (VTYPE*)malloc(Nn * BATCH_SIZE *sizeof(VTYPE));
VTYPE* h_synapse = (VTYPE*)malloc(Nn * Ni * sizeof(VTYPE));
VTYPE* h_neuron_n2 = (VTYPE*)malloc(Nn * BATCH_SIZE * sizeof(VTYPE));
VTYPE* h_neuron_n3 = (VTYPE*)malloc(Nn * BATCH_SIZE * sizeof(VTYPE));
VTYPE* h_neuron_n = (VTYPE*)malloc(Nn * BATCH_SIZE * sizeof(VTYPE));
init_layer(h_neuron_i, h_neuron_n, h_synapse);
// Allocate memory on device
VTYPE* d_neuron_i = NULL;
VTYPE* d_neuron_n1 = NULL;
VTYPE* d_neuron_n2 = NULL;
VTYPE* d_neuron_n3 = NULL;
VTYPE* d_synapse = NULL;
VTYPE* test_var = NULL;
cudaMalloc((void**)&d_neuron_i, Ni * BATCH_SIZE * sizeof(VTYPE));
cudaMalloc((void**)&d_neuron_n1, Nn * BATCH_SIZE * sizeof(VTYPE));
cudaMalloc((void**)&d_neuron_n2, Nn * BATCH_SIZE * sizeof(VTYPE));
cudaMalloc((void**)&d_neuron_n3, Nn * BATCH_SIZE * sizeof(VTYPE));
cudaMalloc((void**)&d_synapse, Nn * Ni * sizeof(VTYPE));
cudaMalloc((void**)&test_var, sizeof(VTYPE));
// Copy arrays from host to device
cudaMemcpy(d_neuron_i, h_neuron_i, Ni * BATCH_SIZE * sizeof(VTYPE), cudaMemcpyHostToDevice);
cout << "Copy from Host to Device: " << cudaGetErrorString(cudaGetLastError()) << endl;
cudaMemcpy(d_synapse, h_synapse, Nn * Ni * sizeof(VTYPE), cudaMemcpyHostToDevice);
cout << "Copy from Host to Device: " << cudaGetErrorString(cudaGetLastError()) << endl;
//Define kernel launch parameters
dim3 ThreadsPerBlock2D = dim3(BlockSize2D, BlockSize2D);
dim3 BlocksPerGrid2D = dim3((Nn + BlockSize2D - 1) / BlockSize2D, (BATCH_SIZE + BlockSize2D - 1) / BlockSize2D);
//Launch kernel #1#
d_MatMul_simple1<<<BlocksPerGrid2D, ThreadsPerBlock2D>>>(d_neuron_i, d_neuron_n1, d_synapse);
cout << "MatMul_simple1: " << cudaGetErrorString(cudaGetLastError()) << endl;
// Copy results from device back to host
cudaMemcpy(h_neuron_n1, d_neuron_n1, Nn * BATCH_SIZE * sizeof(VTYPE), cudaMemcpyDeviceToHost);
cout << "Copy from Device to Host: " << cudaGetErrorString(cudaGetLastError()) << endl;
//Launch kernel #2#
d_MatMul_simple2<<<BlocksPerGrid2D, ThreadsPerBlock2D >>>(d_neuron_i, d_neuron_n2, d_synapse);
cout << "MatMul_simple1: " << cudaGetErrorString(cudaGetLastError()) << endl;
// Copy results from device back to host
cudaMemcpy(h_neuron_n2, d_neuron_n2, Nn * BATCH_SIZE * sizeof(VTYPE), cudaMemcpyDeviceToHost);
cout << "Copy from Device to Host: " << cudaGetErrorString(cudaGetLastError()) << endl;
//Launch kernel #3#
d_MatMul_simple3<<<BlocksPerGrid2D, ThreadsPerBlock2D >>>(d_neuron_i, d_neuron_n3, d_synapse);
cout << "MatMul_simple1: " << cudaGetErrorString(cudaGetLastError()) << endl;
// Copy results from device back to host
cudaMemcpy(h_neuron_n3, d_neuron_n3, Nn * BATCH_SIZE * sizeof(VTYPE), cudaMemcpyDeviceToHost);
cout << "Copy from Device to Host: " << cudaGetErrorString(cudaGetLastError()) << endl;
// Run and time on host
clock_t begin = clock();
#pragma unroll
for (int k = 0; k < BATCH_SIZE; k++) {
#pragma unroll
for (int i = 0; i < Nn; i++) {
VTYPE temp = 0.0f;
#pragma unroll
for (int j = 0; j < Ni; j++) {
temp += h_neuron_i[k * Ni + j] * h_synapse[i + Nn * j];
}
h_neuron_n[k * Nn + i] = temp;
}
/*
* h_neuron_i 16 x 25088
* h_synapse 4096 x 25088
* h_neuron_n 16 x 4096
*/
}
double elapsed = ((double)clock() - (double)begin) / (double)CLOCKS_PER_SEC;
printf("Took CPU %lf seconds to run\n", elapsed);
/*
VTYPE temp = 0.0f;
int k = 1;
int phase = 0;
for (int i = phase * BlockSize2D; i < (phase + 1) * BlockSize2D; i++) {
temp += h_synapse[k * Ni + i];
}
printf("temp in host : %lf\n", temp);
*/
//Compare host and device results
if (compare(h_neuron_n, h_neuron_n1)) {
printf("1 Passed!\n");
}
if (compare(h_neuron_n, h_neuron_n2)) {
printf("2 Passed!\n");
}
if (compare(h_neuron_n, h_neuron_n3)) {
printf("3 Passed!\n");
}
cout << "Host output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n[i]);
}
cout << endl;
cout << "Kernel1 output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n1[i]);
}
cout << endl;
cout << "Kernel2 output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n2[i]);
}
cout << endl;
cout << "Kernel3 output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n3[i]);
}
cout << endl;
// Free up memory
cudaFree(d_neuron_i);
cudaFree(d_neuron_n1);
cudaFree(d_neuron_n2);
cudaFree(d_neuron_n3);
cudaFree(d_synapse);
cudaFree(test_var);
free(h_neuron_i);
free(h_neuron_n);
free(h_synapse);
free(h_neuron_n1);
free(h_neuron_n2);
free(h_neuron_n3);
cout << "done\n";
return 0;
} | .file "tmpxft_00080c7c_00000000-6_kernel.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10init_layerPfS_S_
.type _Z10init_layerPfS_S_, @function
_Z10init_layerPfS_S_:
.LFB3669:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %r12
movq %rdx, %rbp
movq %rsi, %rbx
leaq 16384(%rsi), %r13
.L4:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %r13, %rbx
jne .L4
movq %r12, %rbx
addq $1605632, %r12
.L5:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %r12, %rbx
jne .L5
movq %rbp, %rbx
addq $411041792, %rbp
.L6:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L6
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3669:
.size _Z10init_layerPfS_S_, .-_Z10init_layerPfS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "At index (%d, %d) \t Host result: %lf \t Device result: %lf \n"
.text
.globl _Z7comparePfS_
.type _Z7comparePfS_, @function
_Z7comparePfS_:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r12
movq %rsi, %rbp
movl $0, %r13d
movl $1, %eax
leaq .LC3(%rip), %r15
movl $0, %r14d
jmp .L12
.L13:
addq $1, %rbx
cmpq $4096, %rbx
je .L20
.L15:
movss (%r12,%rbx,4), %xmm0
movss 0(%rbp,%rbx,4), %xmm1
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps .LC1(%rip), %xmm2
cvtss2sd %xmm2, %xmm2
comisd .LC2(%rip), %xmm2
jbe .L13
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl %ebx, %ecx
movl %r13d, %edx
movq %r15, %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movl %r14d, %eax
jmp .L13
.L20:
addl $1, %r13d
addq $16384, %r12
addq $16384, %rbp
cmpl $16, %r13d
je .L11
.L12:
movl $0, %ebx
jmp .L15
.L11:
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3670:
.size _Z7comparePfS_, .-_Z7comparePfS_
.globl _Z42__device_stub__Z16d_MatMul_simple1PKfPfS0_PKfPfS0_
.type _Z42__device_stub__Z16d_MatMul_simple1PKfPfS0_PKfPfS0_, @function
_Z42__device_stub__Z16d_MatMul_simple1PKfPfS0_PKfPfS0_:
.LFB3696:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L25
.L21:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16d_MatMul_simple1PKfPfS0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L21
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z42__device_stub__Z16d_MatMul_simple1PKfPfS0_PKfPfS0_, .-_Z42__device_stub__Z16d_MatMul_simple1PKfPfS0_PKfPfS0_
.globl _Z16d_MatMul_simple1PKfPfS0_
.type _Z16d_MatMul_simple1PKfPfS0_, @function
_Z16d_MatMul_simple1PKfPfS0_:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z16d_MatMul_simple1PKfPfS0_PKfPfS0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z16d_MatMul_simple1PKfPfS0_, .-_Z16d_MatMul_simple1PKfPfS0_
.globl _Z42__device_stub__Z16d_MatMul_simple2PKfPfS0_PKfPfS0_
.type _Z42__device_stub__Z16d_MatMul_simple2PKfPfS0_PKfPfS0_, @function
_Z42__device_stub__Z16d_MatMul_simple2PKfPfS0_PKfPfS0_:
.LFB3698:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16d_MatMul_simple2PKfPfS0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3698:
.size _Z42__device_stub__Z16d_MatMul_simple2PKfPfS0_PKfPfS0_, .-_Z42__device_stub__Z16d_MatMul_simple2PKfPfS0_PKfPfS0_
.globl _Z16d_MatMul_simple2PKfPfS0_
.type _Z16d_MatMul_simple2PKfPfS0_, @function
_Z16d_MatMul_simple2PKfPfS0_:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z16d_MatMul_simple2PKfPfS0_PKfPfS0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _Z16d_MatMul_simple2PKfPfS0_, .-_Z16d_MatMul_simple2PKfPfS0_
.globl _Z42__device_stub__Z16d_MatMul_simple3PKfPfS0_PKfPfS0_
.type _Z42__device_stub__Z16d_MatMul_simple3PKfPfS0_PKfPfS0_, @function
_Z42__device_stub__Z16d_MatMul_simple3PKfPfS0_PKfPfS0_:
.LFB3700:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L41
.L37:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L42
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16d_MatMul_simple3PKfPfS0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L37
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3700:
.size _Z42__device_stub__Z16d_MatMul_simple3PKfPfS0_PKfPfS0_, .-_Z42__device_stub__Z16d_MatMul_simple3PKfPfS0_PKfPfS0_
.globl _Z16d_MatMul_simple3PKfPfS0_
.type _Z16d_MatMul_simple3PKfPfS0_, @function
_Z16d_MatMul_simple3PKfPfS0_:
.LFB3701:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z16d_MatMul_simple3PKfPfS0_PKfPfS0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3701:
.size _Z16d_MatMul_simple3PKfPfS0_, .-_Z16d_MatMul_simple3PKfPfS0_
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "Copy from Host to Device: "
.LC6:
.string "MatMul_simple1: "
.LC7:
.string "Copy from Device to Host: "
.LC9:
.string "Took CPU %lf seconds to run\n"
.LC10:
.string "1 Passed!\n"
.LC11:
.string "2 Passed!\n"
.LC12:
.string "3 Passed!\n"
.LC13:
.string "Host output[0][6:9]: "
.LC14:
.string "%lf, "
.LC15:
.string "Kernel1 output[0][6:9]: "
.LC16:
.string "Kernel2 output[0][6:9]: "
.LC17:
.string "Kernel3 output[0][6:9]: "
.LC18:
.string "done\n"
.text
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $1605632, %edi
call malloc@PLT
movq %rax, %r14
movl $262144, %edi
call malloc@PLT
movq %rax, %r13
movl $411041792, %edi
call malloc@PLT
movq %rax, %r15
movl $262144, %edi
call malloc@PLT
movq %rax, %r12
movl $262144, %edi
call malloc@PLT
movq %rax, %rbp
movl $262144, %edi
call malloc@PLT
movq %rax, %rbx
movq %r15, (%rsp)
movq %r15, %rdx
movq %rax, %rsi
movq %r14, %rdi
call _Z10init_layerPfS_S_
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movq $0, 32(%rsp)
movq $0, 40(%rsp)
movq $0, 48(%rsp)
movq $0, 56(%rsp)
leaq 16(%rsp), %rdi
movl $1605632, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movl $411041792, %esi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $1605632, %edx
movq %r14, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq .LC5(%rip), %r15
movq %r15, %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, 8(%rsp)
call cudaGetLastError@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rsi
movq 8(%rsp), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $1, %ecx
movl $411041792, %edx
movq (%rsp), %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movq %r15, %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %r15
call cudaGetLastError@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rsi
movq %r15, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $16, 64(%rsp)
movl $16, 68(%rsp)
movl $1, 72(%rsp)
movl $256, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movl $1, %ecx
movq 76(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L68
.L46:
leaq .LC6(%rip), %rsi
leaq _ZSt4cout(%rip), %r15
movq %r15, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, 8(%rsp)
call cudaGetLastError@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rsi
movq 8(%rsp), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $2, %ecx
movl $262144, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
leaq .LC7(%rip), %rsi
movq %r15, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %r15
call cudaGetLastError@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rsi
movq %r15, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl 72(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movq 76(%rsp), %rdi
movl 84(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L69
.L47:
leaq .LC6(%rip), %rsi
leaq _ZSt4cout(%rip), %r15
movq %r15, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, 8(%rsp)
call cudaGetLastError@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rsi
movq 8(%rsp), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $2, %ecx
movl $262144, %edx
movq 32(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
leaq .LC7(%rip), %rsi
movq %r15, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %r15
call cudaGetLastError@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rsi
movq %r15, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl 72(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movq 76(%rsp), %rdi
movl 84(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L70
.L48:
leaq .LC6(%rip), %rsi
leaq _ZSt4cout(%rip), %r15
movq %r15, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, 8(%rsp)
call cudaGetLastError@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rsi
movq 8(%rsp), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $2, %ecx
movl $262144, %edx
movq 40(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
leaq .LC7(%rip), %rsi
movq %r15, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %r15
call cudaGetLastError@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rsi
movq %r15, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
call clock@PLT
movq %rax, %r15
movl $0, %r9d
.L49:
movq (%rsp), %rax
leaq 411041792(%rax), %rcx
imulq $100352, %r9, %r8
addq %r14, %r8
movq %r9, %rdi
salq $14, %rdi
addq %rbx, %rdi
movl $0, %esi
.L53:
leaq -411041792(%rcx), %rax
movq %r8, %rdx
pxor %xmm1, %xmm1
.L50:
movss (%rdx), %xmm0
mulss (%rax), %xmm0
addss %xmm0, %xmm1
addq $4, %rdx
addq $16384, %rax
cmpq %rcx, %rax
jne .L50
movss %xmm1, (%rdi,%rsi,4)
addq $1, %rsi
addq $4, %rcx
cmpq $4096, %rsi
jne .L53
addq $1, %r9
cmpq $16, %r9
jne .L49
call clock@PLT
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq %r15, %xmm1
subsd %xmm1, %xmm0
divsd .LC8(%rip), %xmm0
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %r13, %rsi
movq %rbx, %rdi
call _Z7comparePfS_
testb %al, %al
jne .L71
.L54:
movq %r12, %rsi
movq %rbx, %rdi
call _Z7comparePfS_
testb %al, %al
jne .L72
.L55:
movq %rbp, %rsi
movq %rbx, %rdi
call _Z7comparePfS_
testb %al, %al
jne .L73
.L56:
leaq .LC13(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq 24(%rbx), %r15
leaq 36(%rbx), %rax
movq %rax, 8(%rsp)
.L57:
pxor %xmm0, %xmm0
cvtss2sd (%r15), %xmm0
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %r15
movq 8(%rsp), %rax
cmpq %rax, %r15
jne .L57
leaq _ZSt4cout(%rip), %r15
movq %r15, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC15(%rip), %rsi
movq %r15, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq 24(%r13), %r15
leaq 36(%r13), %rax
movq %rax, 8(%rsp)
.L58:
pxor %xmm0, %xmm0
cvtss2sd (%r15), %xmm0
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %r15
movq 8(%rsp), %rax
cmpq %rax, %r15
jne .L58
leaq _ZSt4cout(%rip), %r15
movq %r15, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC16(%rip), %rsi
movq %r15, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq 24(%r12), %r15
leaq 36(%r12), %rax
movq %rax, 8(%rsp)
.L59:
pxor %xmm0, %xmm0
cvtss2sd (%r15), %xmm0
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %r15
cmpq %r15, 8(%rsp)
jne .L59
leaq _ZSt4cout(%rip), %r15
movq %r15, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC17(%rip), %rsi
movq %r15, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq 24(%rbp), %r15
leaq 36(%rbp), %rax
movq %rax, 8(%rsp)
.L60:
pxor %xmm0, %xmm0
cvtss2sd (%r15), %xmm0
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %r15
movq 8(%rsp), %rax
cmpq %rax, %r15
jne .L60
leaq _ZSt4cout(%rip), %r15
movq %r15, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq %r14, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq (%rsp), %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
leaq .LC18(%rip), %rsi
movq %r15, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L74
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L68:
.cfi_restore_state
movq 48(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z42__device_stub__Z16d_MatMul_simple1PKfPfS0_PKfPfS0_
jmp .L46
.L69:
movq 48(%rsp), %rdx
movq 32(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z42__device_stub__Z16d_MatMul_simple2PKfPfS0_PKfPfS0_
jmp .L47
.L70:
movq 48(%rsp), %rdx
movq 40(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z42__device_stub__Z16d_MatMul_simple3PKfPfS0_PKfPfS0_
jmp .L48
.L71:
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L54
.L72:
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L55
.L73:
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L56
.L74:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3671:
.size main, .-main
.globl _Z27__device_stub__Z6d_testPfS_PfS_
.type _Z27__device_stub__Z6d_testPfS_PfS_, @function
_Z27__device_stub__Z6d_testPfS_PfS_:
.LFB3702:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L79
.L75:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L80
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L79:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z6d_testPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L75
.L80:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3702:
.size _Z27__device_stub__Z6d_testPfS_PfS_, .-_Z27__device_stub__Z6d_testPfS_PfS_
.globl _Z6d_testPfS_
.type _Z6d_testPfS_, @function
_Z6d_testPfS_:
.LFB3703:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z6d_testPfS_PfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3703:
.size _Z6d_testPfS_, .-_Z6d_testPfS_
.section .rodata.str1.1
.LC19:
.string "_Z6d_testPfS_"
.LC20:
.string "_Z16d_MatMul_simple3PKfPfS0_"
.LC21:
.string "_Z16d_MatMul_simple2PKfPfS0_"
.LC22:
.string "_Z16d_MatMul_simple1PKfPfS0_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3705:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z6d_testPfS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC20(%rip), %rdx
movq %rdx, %rcx
leaq _Z16d_MatMul_simple3PKfPfS0_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC21(%rip), %rdx
movq %rdx, %rcx
leaq _Z16d_MatMul_simple2PKfPfS0_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC22(%rip), %rdx
movq %rdx, %rcx
leaq _Z16d_MatMul_simple1PKfPfS0_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3705:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 805306368
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC1:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long 1202590843
.long 1065646817
.align 8
.LC8:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cstdlib>
#include <iostream>
#include <stdio.h>
#include <cmath>
#include <cuda_runtime.h>
#include <time.h>
#include "device_launch_parameters.h"
#include "cuda.h"
using namespace std;
#define Nn 4096
#define Ni 25088
#define BATCH_SIZE 16
#define BLOCK_SIZE 32
#define BlockSize2D 16
#define VTYPE float
/*
* synapse (w) is (Nn x Ni)^T
* neuron_i (x) is (BATCH_SIZE x Ni)
* neuron_n (y) is (BATCH_SIZE x Nn)
*
* y = Xw^T
*/
void init_layer(VTYPE* h_neuron_i, VTYPE* h_neuron_n, VTYPE* synapse) {
for (int i = 0; i < Nn; i++) {
h_neuron_n[i] = rand() / (VTYPE)RAND_MAX;
}
for (int i = 0; i < Ni * BATCH_SIZE; i++) {
h_neuron_i[i] = rand() / (VTYPE)RAND_MAX;
}
for (int i = 0; i < Ni * Nn; i++) {
synapse[i] = rand() / (VTYPE)RAND_MAX;
}
}
__launch_bounds__(1024,2)
__global__ void d_MatMul_simple1(const VTYPE* d_neuron_i, VTYPE* d_neuron_n, const VTYPE* synapse) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
VTYPE temp = 0.0f;
if (col < Nn && row < BATCH_SIZE) {
#pragma unroll
for (int i = 0; i < Ni; i++) {
temp += d_neuron_i[row * Ni + i] * synapse[col + Nn * i];
}
d_neuron_n[row * Nn + col] = temp;
}
}
__global__ void d_MatMul_simple2(const VTYPE* d_neuron_i, VTYPE* d_neuron_n, const VTYPE* d_synapse) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ VTYPE neuron_i[BlockSize2D][BlockSize2D];
VTYPE temp = 0.0f;
#pragma unroll
for (int i = 0; i < Ni; i += BlockSize2D) {
if (row < BATCH_SIZE && i + threadIdx.x < Ni) {
neuron_i[threadIdx.y][threadIdx.x] = d_neuron_i[row * Ni + i + threadIdx.x];
}
else {
neuron_i[threadIdx.y][threadIdx.x] = 0.0f;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BlockSize2D; j++) {
temp += neuron_i[threadIdx.y][j] * d_synapse[(j + i)* Nn + col];
}
__syncthreads();
}
if (col < Nn && row < BATCH_SIZE) {
d_neuron_n[row * Nn + col] = temp;
}
}
__global__ void d_MatMul_simple3(const VTYPE* d_neuron_i, VTYPE* d_neuron_n, const VTYPE* d_synapse) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ VTYPE synapse[BlockSize2D][BlockSize2D];
__shared__ VTYPE neuron[BlockSize2D][BlockSize2D];
// MxK = MxN * NxK
VTYPE temp = 0.0f;
#pragma unroll
for (int i = 0; i < (Ni - 1) / BlockSize2D + 1; i++) {
if (row < BATCH_SIZE && i * BlockSize2D + threadIdx.x < Ni) {
neuron[threadIdx.y][threadIdx.x] = d_neuron_i[row * Ni + i * BlockSize2D + threadIdx.x];
}
else {
neuron[threadIdx.y][threadIdx.x] = 0.0f;
}
if (i * BlockSize2D + threadIdx.y < Ni && col < Nn) {
synapse[threadIdx.y][threadIdx.x] = d_synapse[(i * BlockSize2D + threadIdx.y) * Nn + col];
}
else {
synapse[threadIdx.y][threadIdx.x] = 0.0f;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BlockSize2D; j++) {
temp += neuron[threadIdx.y][j] * synapse[j][threadIdx.x];
}
__syncthreads();
}
if (row < BATCH_SIZE && col < Nn) {
d_neuron_n[row * Nn + col] = temp;
}
}
__global__ void d_test(VTYPE* d_synapse, VTYPE* d_neuron_i) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_neuron_i[idx] *= 1.1f;
}
bool compare(VTYPE* neuron1, VTYPE* neuron2) {
bool good = true;
#pragma unroll
for (int k = 0; k < BATCH_SIZE; k++) {
#pragma unroll
for (int i = 0; i < Nn; i++) {
if (fabs(neuron1[k * Nn + i] - neuron2[k * Nn + i]) > 1e-2)
{
good = false;
printf("At index (%d, %d) \t Host result: %lf \t Device result: %lf \n", k, i, neuron1[k * Nn + i], neuron2[k * Nn + i]);
}
}
}
return good;
}
int main()
{
// Initialize arrays on host
VTYPE* h_neuron_i = (VTYPE*)malloc(Ni * BATCH_SIZE *sizeof(VTYPE));
VTYPE* h_neuron_n1 = (VTYPE*)malloc(Nn * BATCH_SIZE *sizeof(VTYPE));
VTYPE* h_synapse = (VTYPE*)malloc(Nn * Ni * sizeof(VTYPE));
VTYPE* h_neuron_n2 = (VTYPE*)malloc(Nn * BATCH_SIZE * sizeof(VTYPE));
VTYPE* h_neuron_n3 = (VTYPE*)malloc(Nn * BATCH_SIZE * sizeof(VTYPE));
VTYPE* h_neuron_n = (VTYPE*)malloc(Nn * BATCH_SIZE * sizeof(VTYPE));
init_layer(h_neuron_i, h_neuron_n, h_synapse);
// Allocate memory on device
VTYPE* d_neuron_i = NULL;
VTYPE* d_neuron_n1 = NULL;
VTYPE* d_neuron_n2 = NULL;
VTYPE* d_neuron_n3 = NULL;
VTYPE* d_synapse = NULL;
VTYPE* test_var = NULL;
cudaMalloc((void**)&d_neuron_i, Ni * BATCH_SIZE * sizeof(VTYPE));
cudaMalloc((void**)&d_neuron_n1, Nn * BATCH_SIZE * sizeof(VTYPE));
cudaMalloc((void**)&d_neuron_n2, Nn * BATCH_SIZE * sizeof(VTYPE));
cudaMalloc((void**)&d_neuron_n3, Nn * BATCH_SIZE * sizeof(VTYPE));
cudaMalloc((void**)&d_synapse, Nn * Ni * sizeof(VTYPE));
cudaMalloc((void**)&test_var, sizeof(VTYPE));
// Copy arrays from host to device
cudaMemcpy(d_neuron_i, h_neuron_i, Ni * BATCH_SIZE * sizeof(VTYPE), cudaMemcpyHostToDevice);
cout << "Copy from Host to Device: " << cudaGetErrorString(cudaGetLastError()) << endl;
cudaMemcpy(d_synapse, h_synapse, Nn * Ni * sizeof(VTYPE), cudaMemcpyHostToDevice);
cout << "Copy from Host to Device: " << cudaGetErrorString(cudaGetLastError()) << endl;
//Define kernel launch parameters
dim3 ThreadsPerBlock2D = dim3(BlockSize2D, BlockSize2D);
dim3 BlocksPerGrid2D = dim3((Nn + BlockSize2D - 1) / BlockSize2D, (BATCH_SIZE + BlockSize2D - 1) / BlockSize2D);
//Launch kernel #1#
d_MatMul_simple1<<<BlocksPerGrid2D, ThreadsPerBlock2D>>>(d_neuron_i, d_neuron_n1, d_synapse);
cout << "MatMul_simple1: " << cudaGetErrorString(cudaGetLastError()) << endl;
// Copy results from device back to host
cudaMemcpy(h_neuron_n1, d_neuron_n1, Nn * BATCH_SIZE * sizeof(VTYPE), cudaMemcpyDeviceToHost);
cout << "Copy from Device to Host: " << cudaGetErrorString(cudaGetLastError()) << endl;
//Launch kernel #2#
d_MatMul_simple2<<<BlocksPerGrid2D, ThreadsPerBlock2D >>>(d_neuron_i, d_neuron_n2, d_synapse);
cout << "MatMul_simple1: " << cudaGetErrorString(cudaGetLastError()) << endl;
// Copy results from device back to host
cudaMemcpy(h_neuron_n2, d_neuron_n2, Nn * BATCH_SIZE * sizeof(VTYPE), cudaMemcpyDeviceToHost);
cout << "Copy from Device to Host: " << cudaGetErrorString(cudaGetLastError()) << endl;
//Launch kernel #3#
d_MatMul_simple3<<<BlocksPerGrid2D, ThreadsPerBlock2D >>>(d_neuron_i, d_neuron_n3, d_synapse);
cout << "MatMul_simple1: " << cudaGetErrorString(cudaGetLastError()) << endl;
// Copy results from device back to host
cudaMemcpy(h_neuron_n3, d_neuron_n3, Nn * BATCH_SIZE * sizeof(VTYPE), cudaMemcpyDeviceToHost);
cout << "Copy from Device to Host: " << cudaGetErrorString(cudaGetLastError()) << endl;
// Run and time on host
clock_t begin = clock();
#pragma unroll
for (int k = 0; k < BATCH_SIZE; k++) {
#pragma unroll
for (int i = 0; i < Nn; i++) {
VTYPE temp = 0.0f;
#pragma unroll
for (int j = 0; j < Ni; j++) {
temp += h_neuron_i[k * Ni + j] * h_synapse[i + Nn * j];
}
h_neuron_n[k * Nn + i] = temp;
}
/*
* h_neuron_i 16 x 25088
* h_synapse 4096 x 25088
* h_neuron_n 16 x 4096
*/
}
double elapsed = ((double)clock() - (double)begin) / (double)CLOCKS_PER_SEC;
printf("Took CPU %lf seconds to run\n", elapsed);
/*
VTYPE temp = 0.0f;
int k = 1;
int phase = 0;
for (int i = phase * BlockSize2D; i < (phase + 1) * BlockSize2D; i++) {
temp += h_synapse[k * Ni + i];
}
printf("temp in host : %lf\n", temp);
*/
//Compare host and device results
if (compare(h_neuron_n, h_neuron_n1)) {
printf("1 Passed!\n");
}
if (compare(h_neuron_n, h_neuron_n2)) {
printf("2 Passed!\n");
}
if (compare(h_neuron_n, h_neuron_n3)) {
printf("3 Passed!\n");
}
cout << "Host output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n[i]);
}
cout << endl;
cout << "Kernel1 output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n1[i]);
}
cout << endl;
cout << "Kernel2 output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n2[i]);
}
cout << endl;
cout << "Kernel3 output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n3[i]);
}
cout << endl;
// Free up memory
cudaFree(d_neuron_i);
cudaFree(d_neuron_n1);
cudaFree(d_neuron_n2);
cudaFree(d_neuron_n3);
cudaFree(d_synapse);
cudaFree(test_var);
free(h_neuron_i);
free(h_neuron_n);
free(h_synapse);
free(h_neuron_n1);
free(h_neuron_n2);
free(h_neuron_n3);
cout << "done\n";
return 0;
} | #include <cstdlib>
#include <iostream>
#include <stdio.h>
#include <cmath>
#include <hip/hip_runtime.h>
#include <time.h>
using namespace std;
#define Nn 4096
#define Ni 25088
#define BATCH_SIZE 16
#define BLOCK_SIZE 32
#define BlockSize2D 16
#define VTYPE float
/*
* synapse (w) is (Nn x Ni)^T
* neuron_i (x) is (BATCH_SIZE x Ni)
* neuron_n (y) is (BATCH_SIZE x Nn)
*
* y = Xw^T
*/
void init_layer(VTYPE* h_neuron_i, VTYPE* h_neuron_n, VTYPE* synapse) {
for (int i = 0; i < Nn; i++) {
h_neuron_n[i] = rand() / (VTYPE)RAND_MAX;
}
for (int i = 0; i < Ni * BATCH_SIZE; i++) {
h_neuron_i[i] = rand() / (VTYPE)RAND_MAX;
}
for (int i = 0; i < Ni * Nn; i++) {
synapse[i] = rand() / (VTYPE)RAND_MAX;
}
}
__launch_bounds__(1024,2)
__global__ void d_MatMul_simple1(const VTYPE* d_neuron_i, VTYPE* d_neuron_n, const VTYPE* synapse) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
VTYPE temp = 0.0f;
if (col < Nn && row < BATCH_SIZE) {
#pragma unroll
for (int i = 0; i < Ni; i++) {
temp += d_neuron_i[row * Ni + i] * synapse[col + Nn * i];
}
d_neuron_n[row * Nn + col] = temp;
}
}
__global__ void d_MatMul_simple2(const VTYPE* d_neuron_i, VTYPE* d_neuron_n, const VTYPE* d_synapse) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ VTYPE neuron_i[BlockSize2D][BlockSize2D];
VTYPE temp = 0.0f;
#pragma unroll
for (int i = 0; i < Ni; i += BlockSize2D) {
if (row < BATCH_SIZE && i + threadIdx.x < Ni) {
neuron_i[threadIdx.y][threadIdx.x] = d_neuron_i[row * Ni + i + threadIdx.x];
}
else {
neuron_i[threadIdx.y][threadIdx.x] = 0.0f;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BlockSize2D; j++) {
temp += neuron_i[threadIdx.y][j] * d_synapse[(j + i)* Nn + col];
}
__syncthreads();
}
if (col < Nn && row < BATCH_SIZE) {
d_neuron_n[row * Nn + col] = temp;
}
}
__global__ void d_MatMul_simple3(const VTYPE* d_neuron_i, VTYPE* d_neuron_n, const VTYPE* d_synapse) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ VTYPE synapse[BlockSize2D][BlockSize2D];
__shared__ VTYPE neuron[BlockSize2D][BlockSize2D];
// MxK = MxN * NxK
VTYPE temp = 0.0f;
#pragma unroll
for (int i = 0; i < (Ni - 1) / BlockSize2D + 1; i++) {
if (row < BATCH_SIZE && i * BlockSize2D + threadIdx.x < Ni) {
neuron[threadIdx.y][threadIdx.x] = d_neuron_i[row * Ni + i * BlockSize2D + threadIdx.x];
}
else {
neuron[threadIdx.y][threadIdx.x] = 0.0f;
}
if (i * BlockSize2D + threadIdx.y < Ni && col < Nn) {
synapse[threadIdx.y][threadIdx.x] = d_synapse[(i * BlockSize2D + threadIdx.y) * Nn + col];
}
else {
synapse[threadIdx.y][threadIdx.x] = 0.0f;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BlockSize2D; j++) {
temp += neuron[threadIdx.y][j] * synapse[j][threadIdx.x];
}
__syncthreads();
}
if (row < BATCH_SIZE && col < Nn) {
d_neuron_n[row * Nn + col] = temp;
}
}
__global__ void d_test(VTYPE* d_synapse, VTYPE* d_neuron_i) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_neuron_i[idx] *= 1.1f;
}
bool compare(VTYPE* neuron1, VTYPE* neuron2) {
bool good = true;
#pragma unroll
for (int k = 0; k < BATCH_SIZE; k++) {
#pragma unroll
for (int i = 0; i < Nn; i++) {
if (fabs(neuron1[k * Nn + i] - neuron2[k * Nn + i]) > 1e-2)
{
good = false;
printf("At index (%d, %d) \t Host result: %lf \t Device result: %lf \n", k, i, neuron1[k * Nn + i], neuron2[k * Nn + i]);
}
}
}
return good;
}
int main()
{
// Initialize arrays on host
VTYPE* h_neuron_i = (VTYPE*)malloc(Ni * BATCH_SIZE *sizeof(VTYPE));
VTYPE* h_neuron_n1 = (VTYPE*)malloc(Nn * BATCH_SIZE *sizeof(VTYPE));
VTYPE* h_synapse = (VTYPE*)malloc(Nn * Ni * sizeof(VTYPE));
VTYPE* h_neuron_n2 = (VTYPE*)malloc(Nn * BATCH_SIZE * sizeof(VTYPE));
VTYPE* h_neuron_n3 = (VTYPE*)malloc(Nn * BATCH_SIZE * sizeof(VTYPE));
VTYPE* h_neuron_n = (VTYPE*)malloc(Nn * BATCH_SIZE * sizeof(VTYPE));
init_layer(h_neuron_i, h_neuron_n, h_synapse);
// Allocate memory on device
VTYPE* d_neuron_i = NULL;
VTYPE* d_neuron_n1 = NULL;
VTYPE* d_neuron_n2 = NULL;
VTYPE* d_neuron_n3 = NULL;
VTYPE* d_synapse = NULL;
VTYPE* test_var = NULL;
hipMalloc((void**)&d_neuron_i, Ni * BATCH_SIZE * sizeof(VTYPE));
hipMalloc((void**)&d_neuron_n1, Nn * BATCH_SIZE * sizeof(VTYPE));
hipMalloc((void**)&d_neuron_n2, Nn * BATCH_SIZE * sizeof(VTYPE));
hipMalloc((void**)&d_neuron_n3, Nn * BATCH_SIZE * sizeof(VTYPE));
hipMalloc((void**)&d_synapse, Nn * Ni * sizeof(VTYPE));
hipMalloc((void**)&test_var, sizeof(VTYPE));
// Copy arrays from host to device
hipMemcpy(d_neuron_i, h_neuron_i, Ni * BATCH_SIZE * sizeof(VTYPE), hipMemcpyHostToDevice);
cout << "Copy from Host to Device: " << hipGetErrorString(hipGetLastError()) << endl;
hipMemcpy(d_synapse, h_synapse, Nn * Ni * sizeof(VTYPE), hipMemcpyHostToDevice);
cout << "Copy from Host to Device: " << hipGetErrorString(hipGetLastError()) << endl;
//Define kernel launch parameters
dim3 ThreadsPerBlock2D = dim3(BlockSize2D, BlockSize2D);
dim3 BlocksPerGrid2D = dim3((Nn + BlockSize2D - 1) / BlockSize2D, (BATCH_SIZE + BlockSize2D - 1) / BlockSize2D);
//Launch kernel #1#
d_MatMul_simple1<<<BlocksPerGrid2D, ThreadsPerBlock2D>>>(d_neuron_i, d_neuron_n1, d_synapse);
cout << "MatMul_simple1: " << hipGetErrorString(hipGetLastError()) << endl;
// Copy results from device back to host
hipMemcpy(h_neuron_n1, d_neuron_n1, Nn * BATCH_SIZE * sizeof(VTYPE), hipMemcpyDeviceToHost);
cout << "Copy from Device to Host: " << hipGetErrorString(hipGetLastError()) << endl;
//Launch kernel #2#
d_MatMul_simple2<<<BlocksPerGrid2D, ThreadsPerBlock2D >>>(d_neuron_i, d_neuron_n2, d_synapse);
cout << "MatMul_simple1: " << hipGetErrorString(hipGetLastError()) << endl;
// Copy results from device back to host
hipMemcpy(h_neuron_n2, d_neuron_n2, Nn * BATCH_SIZE * sizeof(VTYPE), hipMemcpyDeviceToHost);
cout << "Copy from Device to Host: " << hipGetErrorString(hipGetLastError()) << endl;
//Launch kernel #3#
d_MatMul_simple3<<<BlocksPerGrid2D, ThreadsPerBlock2D >>>(d_neuron_i, d_neuron_n3, d_synapse);
cout << "MatMul_simple1: " << hipGetErrorString(hipGetLastError()) << endl;
// Copy results from device back to host
hipMemcpy(h_neuron_n3, d_neuron_n3, Nn * BATCH_SIZE * sizeof(VTYPE), hipMemcpyDeviceToHost);
cout << "Copy from Device to Host: " << hipGetErrorString(hipGetLastError()) << endl;
// Run and time on host
clock_t begin = clock();
#pragma unroll
for (int k = 0; k < BATCH_SIZE; k++) {
#pragma unroll
for (int i = 0; i < Nn; i++) {
VTYPE temp = 0.0f;
#pragma unroll
for (int j = 0; j < Ni; j++) {
temp += h_neuron_i[k * Ni + j] * h_synapse[i + Nn * j];
}
h_neuron_n[k * Nn + i] = temp;
}
/*
* h_neuron_i 16 x 25088
* h_synapse 4096 x 25088
* h_neuron_n 16 x 4096
*/
}
double elapsed = ((double)clock() - (double)begin) / (double)CLOCKS_PER_SEC;
printf("Took CPU %lf seconds to run\n", elapsed);
/*
VTYPE temp = 0.0f;
int k = 1;
int phase = 0;
for (int i = phase * BlockSize2D; i < (phase + 1) * BlockSize2D; i++) {
temp += h_synapse[k * Ni + i];
}
printf("temp in host : %lf\n", temp);
*/
//Compare host and device results
if (compare(h_neuron_n, h_neuron_n1)) {
printf("1 Passed!\n");
}
if (compare(h_neuron_n, h_neuron_n2)) {
printf("2 Passed!\n");
}
if (compare(h_neuron_n, h_neuron_n3)) {
printf("3 Passed!\n");
}
cout << "Host output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n[i]);
}
cout << endl;
cout << "Kernel1 output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n1[i]);
}
cout << endl;
cout << "Kernel2 output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n2[i]);
}
cout << endl;
cout << "Kernel3 output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n3[i]);
}
cout << endl;
// Free up memory
hipFree(d_neuron_i);
hipFree(d_neuron_n1);
hipFree(d_neuron_n2);
hipFree(d_neuron_n3);
hipFree(d_synapse);
hipFree(test_var);
free(h_neuron_i);
free(h_neuron_n);
free(h_synapse);
free(h_neuron_n1);
free(h_neuron_n2);
free(h_neuron_n3);
cout << "done\n";
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void gpu_seqwr_kernel(int *buffer, size_t reps, size_t elements)
{
for(size_t j = 0; j < reps; j++) {
size_t ofs = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
while(ofs < elements) {
buffer[ofs] = 0;
ofs += step;
}
}
} | code for sm_80
Function : _Z16gpu_seqwr_kernelPimm
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ ISETP.NE.U32.AND P0, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */
/* 0x000fc80003f05070 */
/*0020*/ ISETP.NE.AND.EX P0, PT, RZ, c[0x0][0x16c], PT, P0 ; /* 0x00005b00ff007a0c */
/* 0x000fda0003f05300 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0050*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff097624 */
/* 0x000fe200078e00ff */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0070*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x000fe200078e00ff */
/*0080*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0090*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x000fe400078e00ff */
/*00a0*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*00b0*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fe20003f06070 */
/*00c0*/ BSSY B0, 0x1f0 ; /* 0x0000012000007945 */
/* 0x000fe20003800000 */
/*00d0*/ IADD3 R6, P1, R6, 0x1, RZ ; /* 0x0000000106067810 */
/* 0x000fe40007f3e0ff */
/*00e0*/ ISETP.GE.U32.AND.EX P0, PT, RZ, c[0x0][0x174], PT, P0 ; /* 0x00005d00ff007a0c */
/* 0x000fc60003f06100 */
/*00f0*/ IMAD.X R8, RZ, RZ, R8, P1 ; /* 0x000000ffff087224 */
/* 0x000fe200008e0608 */
/*0100*/ ISETP.GE.U32.AND P1, PT, R6, c[0x0][0x168], PT ; /* 0x00005a0006007a0c */
/* 0x000fc80003f26070 */
/*0110*/ ISETP.GE.U32.AND.EX P1, PT, R8, c[0x0][0x16c], PT, P1 ; /* 0x00005b0008007a0c */
/* 0x000fca0003f26110 */
/*0120*/ @P0 BRA 0x1e0 ; /* 0x000000b000000947 */
/* 0x000fea0003800000 */
/*0130*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */
/* 0x000fe400078e0000 */
/*0140*/ IMAD.MOV.U32 R5, RZ, RZ, RZ ; /* 0x000000ffff057224 */
/* 0x000fc600078e00ff */
/*0150*/ LEA R2, P0, R4, c[0x0][0x160], 0x2 ; /* 0x0000580004027a11 */
/* 0x000fe200078010ff */
/*0160*/ IMAD R7, R9, c[0x0][0xc], RZ ; /* 0x0000030009077a24 */
/* 0x000fc600078e02ff */
/*0170*/ LEA.HI.X R3, R4, c[0x0][0x164], R5, 0x2, P0 ; /* 0x0000590004037a11 */
/* 0x000fe400000f1405 */
/*0180*/ IADD3 R4, P0, R7, R4, RZ ; /* 0x0000000407047210 */
/* 0x000fc60007f1e0ff */
/*0190*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */
/* 0x0001e4000c101904 */
/*01a0*/ IMAD.X R5, RZ, RZ, R5, P0 ; /* 0x000000ffff057224 */
/* 0x000fe200000e0605 */
/*01b0*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fc80003f06070 */
/*01c0*/ ISETP.GE.U32.AND.EX P0, PT, R5, c[0x0][0x174], PT, P0 ; /* 0x00005d0005007a0c */
/* 0x000fda0003f06100 */
/*01d0*/ @!P0 BRA 0x150 ; /* 0xffffff7000008947 */
/* 0x001fea000383ffff */
/*01e0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*01f0*/ @!P1 BRA 0xb0 ; /* 0xfffffeb000009947 */
/* 0x000fea000383ffff */
/*0200*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0210*/ BRA 0x210; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void gpu_seqwr_kernel(int *buffer, size_t reps, size_t elements)
{
for(size_t j = 0; j < reps; j++) {
size_t ofs = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
while(ofs < elements) {
buffer[ofs] = 0;
ofs += step;
}
}
} | .file "tmpxft_00017ee3_00000000-6_gpu_seqwr_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z16gpu_seqwr_kernelPimmPimm
.type _Z38__device_stub__Z16gpu_seqwr_kernelPimmPimm, @function
_Z38__device_stub__Z16gpu_seqwr_kernelPimmPimm:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16gpu_seqwr_kernelPimm(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z16gpu_seqwr_kernelPimmPimm, .-_Z38__device_stub__Z16gpu_seqwr_kernelPimmPimm
.globl _Z16gpu_seqwr_kernelPimm
.type _Z16gpu_seqwr_kernelPimm, @function
_Z16gpu_seqwr_kernelPimm:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z16gpu_seqwr_kernelPimmPimm
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z16gpu_seqwr_kernelPimm, .-_Z16gpu_seqwr_kernelPimm
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z16gpu_seqwr_kernelPimm"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z16gpu_seqwr_kernelPimm(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void gpu_seqwr_kernel(int *buffer, size_t reps, size_t elements)
{
for(size_t j = 0; j < reps; j++) {
size_t ofs = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
while(ofs < elements) {
buffer[ofs] = 0;
ofs += step;
}
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void gpu_seqwr_kernel(int *buffer, size_t reps, size_t elements)
{
for(size_t j = 0; j < reps; j++) {
size_t ofs = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
while(ofs < elements) {
buffer[ofs] = 0;
ofs += step;
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void gpu_seqwr_kernel(int *buffer, size_t reps, size_t elements)
{
for(size_t j = 0; j < reps; j++) {
size_t ofs = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
while(ofs < elements) {
buffer[ofs] = 0;
ofs += step;
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16gpu_seqwr_kernelPimm
.globl _Z16gpu_seqwr_kernelPimm
.p2align 8
.type _Z16gpu_seqwr_kernelPimm,@function
_Z16gpu_seqwr_kernelPimm:
s_load_b64 s[2:3], s[0:1], 0x8
s_mov_b64 s[4:5], 0
s_waitcnt lgkmcnt(0)
s_cmp_eq_u64 s[2:3], 0
s_cbranch_scc1 .LBB0_6
s_clause 0x3
s_load_b32 s8, s[0:1], 0x24
s_load_b64 s[6:7], s[0:1], 0x10
s_load_b64 s[10:11], s[0:1], 0x0
s_load_b32 s1, s[0:1], 0x18
s_mov_b32 s9, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s8, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
v_mov_b32_e32 v2, 0
v_mov_b32_e32 v0, 0
s_mul_i32 s8, s1, s8
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v3, s0, s10, v3
v_add_co_ci_u32_e64 v4, s0, s11, v4, s0
s_lshl_b64 s[10:11], s[8:9], 2
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s12
s_add_u32 s4, s4, 1
s_addc_u32 s5, s5, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u64 s[4:5], s[2:3]
s_cbranch_scc1 .LBB0_6
.LBB0_3:
s_and_saveexec_b32 s12, vcc_lo
s_cbranch_execz .LBB0_2
v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v5, v3
v_dual_mov_b32 v8, v2 :: v_dual_mov_b32 v7, v1
s_mov_b32 s13, 0
.LBB0_5:
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v7, s0, v7, s8
v_add_co_ci_u32_e64 v8, s0, s9, v8, s0
global_store_b32 v[5:6], v0, off
v_add_co_u32 v5, s1, v5, s10
v_cmp_le_u64_e64 s0, s[6:7], v[7:8]
v_add_co_ci_u32_e64 v6, s1, s11, v6, s1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_or_b32 s13, s0, s13
s_and_not1_b32 exec_lo, exec_lo, s13
s_cbranch_execnz .LBB0_5
s_branch .LBB0_2
.LBB0_6:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16gpu_seqwr_kernelPimm
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16gpu_seqwr_kernelPimm, .Lfunc_end0-_Z16gpu_seqwr_kernelPimm
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16gpu_seqwr_kernelPimm
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16gpu_seqwr_kernelPimm.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void gpu_seqwr_kernel(int *buffer, size_t reps, size_t elements)
{
for(size_t j = 0; j < reps; j++) {
size_t ofs = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
while(ofs < elements) {
buffer[ofs] = 0;
ofs += step;
}
}
} | .text
.file "gpu_seqwr_kernel.hip"
.globl _Z31__device_stub__gpu_seqwr_kernelPimm # -- Begin function _Z31__device_stub__gpu_seqwr_kernelPimm
.p2align 4, 0x90
.type _Z31__device_stub__gpu_seqwr_kernelPimm,@function
_Z31__device_stub__gpu_seqwr_kernelPimm: # @_Z31__device_stub__gpu_seqwr_kernelPimm
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16gpu_seqwr_kernelPimm, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z31__device_stub__gpu_seqwr_kernelPimm, .Lfunc_end0-_Z31__device_stub__gpu_seqwr_kernelPimm
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16gpu_seqwr_kernelPimm, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16gpu_seqwr_kernelPimm,@object # @_Z16gpu_seqwr_kernelPimm
.section .rodata,"a",@progbits
.globl _Z16gpu_seqwr_kernelPimm
.p2align 3, 0x0
_Z16gpu_seqwr_kernelPimm:
.quad _Z31__device_stub__gpu_seqwr_kernelPimm
.size _Z16gpu_seqwr_kernelPimm, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16gpu_seqwr_kernelPimm"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__gpu_seqwr_kernelPimm
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16gpu_seqwr_kernelPimm
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z16gpu_seqwr_kernelPimm
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ ISETP.NE.U32.AND P0, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */
/* 0x000fc80003f05070 */
/*0020*/ ISETP.NE.AND.EX P0, PT, RZ, c[0x0][0x16c], PT, P0 ; /* 0x00005b00ff007a0c */
/* 0x000fda0003f05300 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0050*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff097624 */
/* 0x000fe200078e00ff */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0070*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x000fe200078e00ff */
/*0080*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0090*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x000fe400078e00ff */
/*00a0*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*00b0*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fe20003f06070 */
/*00c0*/ BSSY B0, 0x1f0 ; /* 0x0000012000007945 */
/* 0x000fe20003800000 */
/*00d0*/ IADD3 R6, P1, R6, 0x1, RZ ; /* 0x0000000106067810 */
/* 0x000fe40007f3e0ff */
/*00e0*/ ISETP.GE.U32.AND.EX P0, PT, RZ, c[0x0][0x174], PT, P0 ; /* 0x00005d00ff007a0c */
/* 0x000fc60003f06100 */
/*00f0*/ IMAD.X R8, RZ, RZ, R8, P1 ; /* 0x000000ffff087224 */
/* 0x000fe200008e0608 */
/*0100*/ ISETP.GE.U32.AND P1, PT, R6, c[0x0][0x168], PT ; /* 0x00005a0006007a0c */
/* 0x000fc80003f26070 */
/*0110*/ ISETP.GE.U32.AND.EX P1, PT, R8, c[0x0][0x16c], PT, P1 ; /* 0x00005b0008007a0c */
/* 0x000fca0003f26110 */
/*0120*/ @P0 BRA 0x1e0 ; /* 0x000000b000000947 */
/* 0x000fea0003800000 */
/*0130*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */
/* 0x000fe400078e0000 */
/*0140*/ IMAD.MOV.U32 R5, RZ, RZ, RZ ; /* 0x000000ffff057224 */
/* 0x000fc600078e00ff */
/*0150*/ LEA R2, P0, R4, c[0x0][0x160], 0x2 ; /* 0x0000580004027a11 */
/* 0x000fe200078010ff */
/*0160*/ IMAD R7, R9, c[0x0][0xc], RZ ; /* 0x0000030009077a24 */
/* 0x000fc600078e02ff */
/*0170*/ LEA.HI.X R3, R4, c[0x0][0x164], R5, 0x2, P0 ; /* 0x0000590004037a11 */
/* 0x000fe400000f1405 */
/*0180*/ IADD3 R4, P0, R7, R4, RZ ; /* 0x0000000407047210 */
/* 0x000fc60007f1e0ff */
/*0190*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */
/* 0x0001e4000c101904 */
/*01a0*/ IMAD.X R5, RZ, RZ, R5, P0 ; /* 0x000000ffff057224 */
/* 0x000fe200000e0605 */
/*01b0*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fc80003f06070 */
/*01c0*/ ISETP.GE.U32.AND.EX P0, PT, R5, c[0x0][0x174], PT, P0 ; /* 0x00005d0005007a0c */
/* 0x000fda0003f06100 */
/*01d0*/ @!P0 BRA 0x150 ; /* 0xffffff7000008947 */
/* 0x001fea000383ffff */
/*01e0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*01f0*/ @!P1 BRA 0xb0 ; /* 0xfffffeb000009947 */
/* 0x000fea000383ffff */
/*0200*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0210*/ BRA 0x210; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16gpu_seqwr_kernelPimm
.globl _Z16gpu_seqwr_kernelPimm
.p2align 8
.type _Z16gpu_seqwr_kernelPimm,@function
_Z16gpu_seqwr_kernelPimm:
s_load_b64 s[2:3], s[0:1], 0x8
s_mov_b64 s[4:5], 0
s_waitcnt lgkmcnt(0)
s_cmp_eq_u64 s[2:3], 0
s_cbranch_scc1 .LBB0_6
s_clause 0x3
s_load_b32 s8, s[0:1], 0x24
s_load_b64 s[6:7], s[0:1], 0x10
s_load_b64 s[10:11], s[0:1], 0x0
s_load_b32 s1, s[0:1], 0x18
s_mov_b32 s9, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s8, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
v_mov_b32_e32 v2, 0
v_mov_b32_e32 v0, 0
s_mul_i32 s8, s1, s8
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v3, s0, s10, v3
v_add_co_ci_u32_e64 v4, s0, s11, v4, s0
s_lshl_b64 s[10:11], s[8:9], 2
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s12
s_add_u32 s4, s4, 1
s_addc_u32 s5, s5, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u64 s[4:5], s[2:3]
s_cbranch_scc1 .LBB0_6
.LBB0_3:
s_and_saveexec_b32 s12, vcc_lo
s_cbranch_execz .LBB0_2
v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v5, v3
v_dual_mov_b32 v8, v2 :: v_dual_mov_b32 v7, v1
s_mov_b32 s13, 0
.LBB0_5:
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v7, s0, v7, s8
v_add_co_ci_u32_e64 v8, s0, s9, v8, s0
global_store_b32 v[5:6], v0, off
v_add_co_u32 v5, s1, v5, s10
v_cmp_le_u64_e64 s0, s[6:7], v[7:8]
v_add_co_ci_u32_e64 v6, s1, s11, v6, s1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_or_b32 s13, s0, s13
s_and_not1_b32 exec_lo, exec_lo, s13
s_cbranch_execnz .LBB0_5
s_branch .LBB0_2
.LBB0_6:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16gpu_seqwr_kernelPimm
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16gpu_seqwr_kernelPimm, .Lfunc_end0-_Z16gpu_seqwr_kernelPimm
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16gpu_seqwr_kernelPimm
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16gpu_seqwr_kernelPimm.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00017ee3_00000000-6_gpu_seqwr_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z16gpu_seqwr_kernelPimmPimm
.type _Z38__device_stub__Z16gpu_seqwr_kernelPimmPimm, @function
_Z38__device_stub__Z16gpu_seqwr_kernelPimmPimm:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16gpu_seqwr_kernelPimm(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z16gpu_seqwr_kernelPimmPimm, .-_Z38__device_stub__Z16gpu_seqwr_kernelPimmPimm
.globl _Z16gpu_seqwr_kernelPimm
.type _Z16gpu_seqwr_kernelPimm, @function
_Z16gpu_seqwr_kernelPimm:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z16gpu_seqwr_kernelPimmPimm
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z16gpu_seqwr_kernelPimm, .-_Z16gpu_seqwr_kernelPimm
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z16gpu_seqwr_kernelPimm"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z16gpu_seqwr_kernelPimm(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "gpu_seqwr_kernel.hip"
.globl _Z31__device_stub__gpu_seqwr_kernelPimm # -- Begin function _Z31__device_stub__gpu_seqwr_kernelPimm
.p2align 4, 0x90
.type _Z31__device_stub__gpu_seqwr_kernelPimm,@function
_Z31__device_stub__gpu_seqwr_kernelPimm: # @_Z31__device_stub__gpu_seqwr_kernelPimm
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16gpu_seqwr_kernelPimm, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z31__device_stub__gpu_seqwr_kernelPimm, .Lfunc_end0-_Z31__device_stub__gpu_seqwr_kernelPimm
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16gpu_seqwr_kernelPimm, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16gpu_seqwr_kernelPimm,@object # @_Z16gpu_seqwr_kernelPimm
.section .rodata,"a",@progbits
.globl _Z16gpu_seqwr_kernelPimm
.p2align 3, 0x0
_Z16gpu_seqwr_kernelPimm:
.quad _Z31__device_stub__gpu_seqwr_kernelPimm
.size _Z16gpu_seqwr_kernelPimm, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16gpu_seqwr_kernelPimm"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__gpu_seqwr_kernelPimm
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16gpu_seqwr_kernelPimm
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #define COALESCED_NUM 16
#define blockDimX 256
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 2048
#define WIDTH_A 2048
__global__ void matmul(float * A, float * B, float * C, int width, int height)
{
__shared__ float shared_0[16];
int i;
float sum;
sum=0;
for (i=0; i<width; i=(i+16))
{
int it_1;
if ((tidx<16))
{
shared_0[(tidx+0)]=A(idy, (i+tidx));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<16; it_1=(it_1+1))
{
float a;
float b;
a=shared_0[it_1];
b=B((it_1+i), idx);
sum+=(a*b);
}
__syncthreads();
}
{
C(idy, idx)=sum;
}
} | code for sm_80
Function : _Z6matmulPfS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ ISETP.LT.AND P0, PT, RZ, c[0x0][0x178], PT ; /* 0x00005e00ff007a0c */
/* 0x000fe20003f01270 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd80000000a00 */
/*0030*/ @!P0 MOV R26, RZ ; /* 0x000000ff001a8202 */
/* 0x000fe20000000f00 */
/*0040*/ @!P0 BRA 0x5e0 ; /* 0x0000059000008947 */
/* 0x000fea0003800000 */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0060*/ MOV R26, RZ ; /* 0x000000ff001a7202 */
/* 0x000fc60000000f00 */
/*0070*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */
/* 0x000e680000002200 */
/*0080*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000ea80000002500 */
/*0090*/ S2R R4, SR_CTAID.Y ; /* 0x0000000000047919 */
/* 0x000ee20000002600 */
/*00a0*/ ISETP.GT.U32.AND P0, PT, R3, 0xf, PT ; /* 0x0000000f0300780c */
/* 0x001fe40003f04070 */
/*00b0*/ LEA R17, R0, R3.reuse, 0xb ; /* 0x0000000300117211 */
/* 0x082fe200078e58ff */
/*00c0*/ HFMA2.MMA R0, -RZ, RZ, 0, 0 ; /* 0x00000000ff007435 */
/* 0x000fe200000001ff */
/*00d0*/ LEA R2, R2, R3, 0x8 ; /* 0x0000000302027211 */
/* 0x004fc400078e40ff */
/*00e0*/ LEA R17, R4, R17, 0xb ; /* 0x0000001104117211 */
/* 0x008fe400078e58ff */
/*00f0*/ IADD3 R16, R2, 0x7800, RZ ; /* 0x0000780002107810 */
/* 0x000fca0007ffe0ff */
/*0100*/ @!P0 IADD3 R6, R17, R0, RZ ; /* 0x0000000011068210 */
/* 0x000fe40007ffe0ff */
/*0110*/ MOV R27, 0x4 ; /* 0x00000004001b7802 */
/* 0x000fca0000000f00 */
/*0120*/ @!P0 IMAD.WIDE.U32 R6, R6, R27, c[0x0][0x160] ; /* 0x0000580006068625 */
/* 0x000fcc00078e001b */
/*0130*/ @!P0 LDG.E R6, [R6.64] ; /* 0x0000000406068981 */
/* 0x000ea2000c1e1900 */
/*0140*/ IMAD.WIDE.U32 R8, R2.reuse, R27, c[0x0][0x168] ; /* 0x00005a0002087625 */
/* 0x040fe200078e001b */
/*0150*/ IADD3 R12, R2.reuse, 0x800, RZ ; /* 0x00000800020c7810 */
/* 0x040fe40007ffe0ff */
/*0160*/ IADD3 R18, R2, 0x1000, RZ ; /* 0x0000100002127810 */
/* 0x000fc60007ffe0ff */
/*0170*/ IMAD.WIDE.U32 R12, R12, R27, c[0x0][0x168] ; /* 0x00005a000c0c7625 */
/* 0x000fc800078e001b */
/*0180*/ IMAD.WIDE.U32 R18, R18, R27, c[0x0][0x168] ; /* 0x00005a0012127625 */
/* 0x000fe200078e001b */
/*0190*/ IADD3 R10, R2.reuse, 0x1800, RZ ; /* 0x00001800020a7810 */
/* 0x040fe40007ffe0ff */
/*01a0*/ IADD3 R24, R2, 0x2000, RZ ; /* 0x0000200002187810 */
/* 0x000fca0007ffe0ff */
/*01b0*/ IMAD.WIDE.U32 R24, R24, R27.reuse, c[0x0][0x168] ; /* 0x00005a0018187625 */
/* 0x080fe200078e001b */
/*01c0*/ IADD3 R20, R2, 0x2800, RZ ; /* 0x0000280002147810 */
/* 0x000fe20007ffe0ff */
/*01d0*/ @!P0 STS [R3.X4], R6 ; /* 0x0000000603008388 */
/* 0x0041e80000004800 */
/*01e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*01f0*/ LDG.E R21, [R8.64] ; /* 0x0000000408157981 */
/* 0x0002a8000c1e1900 */
/*0200*/ LDG.E R4, [R12.64] ; /* 0x000000040c047981 */
/* 0x000728000c1e1900 */
/*0210*/ LDG.E R5, [R18.64] ; /* 0x0000000412057981 */
/* 0x000b22000c1e1900 */
/*0220*/ IMAD.WIDE.U32 R6, R10, R27, c[0x0][0x168] ; /* 0x00005a000a067625 */
/* 0x001fc600078e001b */
/*0230*/ LDG.E R15, [R24.64] ; /* 0x00000004180f7981 */
/* 0x000128000c1e1900 */
/*0240*/ LDG.E R14, [R6.64] ; /* 0x00000004060e7981 */
/* 0x000122000c1e1900 */
/*0250*/ IADD3 R22, R2, 0x3000, RZ ; /* 0x0000300002167810 */
/* 0x000fe20007ffe0ff */
/*0260*/ IMAD.WIDE.U32 R12, R20, R27.reuse, c[0x0][0x168] ; /* 0x00005a00140c7625 */
/* 0x088fe400078e001b */
/*0270*/ LDS.128 R8, [RZ] ; /* 0x00000000ff087984 */
/* 0x002ea40000000c00 */
/*0280*/ IMAD.WIDE.U32 R22, R22, R27, c[0x0][0x168] ; /* 0x00005a0016167625 */
/* 0x000fc400078e001b */
/*0290*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x0002e8000c1e1900 */
/*02a0*/ LDG.E R13, [R22.64] ; /* 0x00000004160d7981 */
/* 0x0022e2000c1e1900 */
/*02b0*/ IADD3 R28, R2, 0x3800, RZ ; /* 0x00003800021c7810 */
/* 0x000fe40007ffe0ff */
/*02c0*/ IADD3 R18, R16, -0x3800, RZ ; /* 0xffffc80010127810 */
/* 0x020fc60007ffe0ff */
/*02d0*/ IMAD.WIDE.U32 R28, R28, R27, c[0x0][0x168] ; /* 0x00005a001c1c7625 */
/* 0x000fe200078e001b */
/*02e0*/ IADD3 R24, R16, -0x3000, RZ ; /* 0xffffd00010187810 */
/* 0x001fc60007ffe0ff */
/*02f0*/ IMAD.WIDE.U32 R6, R18, R27.reuse, c[0x0][0x168] ; /* 0x00005a0012067625 */
/* 0x080fe400078e001b */
/*0300*/ LDG.E R18, [R28.64] ; /* 0x000000041c127981 */
/* 0x000162000c1e1900 */
/*0310*/ IADD3 R22, R16, -0x2800, RZ ; /* 0xffffd80010167810 */
/* 0x002fe20007ffe0ff */
/*0320*/ IMAD.WIDE.U32 R24, R24, R27.reuse, c[0x0][0x168] ; /* 0x00005a0018187625 */
/* 0x080fe400078e001b */
/*0330*/ LDG.E R19, [R6.64] ; /* 0x0000000406137981 */
/* 0x0002e4000c1e1900 */
/*0340*/ IMAD.WIDE.U32 R22, R22, R27, c[0x0][0x168] ; /* 0x00005a0016167625 */
/* 0x000fe400078e001b */
/*0350*/ LDG.E R20, [R24.64] ; /* 0x0000000418147981 */
/* 0x0004e2000c1e1900 */
/*0360*/ IADD3 R28, R16, -0x1800, RZ ; /* 0xffffe800101c7810 */
/* 0x001fc40007ffe0ff */
/*0370*/ IADD3 R6, R16, -0x2000, RZ ; /* 0xffffe00010067810 */
/* 0x002fc60007ffe0ff */
/*0380*/ IMAD.WIDE.U32 R28, R28, R27, c[0x0][0x168] ; /* 0x00005a001c1c7625 */
/* 0x000fc800078e001b */
/*0390*/ IMAD.WIDE.U32 R6, R6, R27, c[0x0][0x168] ; /* 0x00005a0006067625 */
/* 0x000fc800078e001b */
/*03a0*/ FFMA R8, R21, R8, R26 ; /* 0x0000000815087223 */
/* 0x004fe2000000001a */
/*03b0*/ IADD3 R26, R16, -0x1000, RZ ; /* 0xfffff000101a7810 */
/* 0x000fe20007ffe0ff */
/*03c0*/ LDG.E R21, [R22.64] ; /* 0x0000000416157981 */
/* 0x0000a8000c1e1900 */
/*03d0*/ IMAD.WIDE.U32 R24, R26, R27, c[0x0][0x168] ; /* 0x00005a001a187625 */
/* 0x000fe200078e001b */
/*03e0*/ LDG.E R22, [R6.64] ; /* 0x0000000406167981 */
/* 0x0010a8000c1e1900 */
/*03f0*/ LDG.E R23, [R28.64] ; /* 0x000000041c177981 */
/* 0x0002a8000c1e1900 */
/*0400*/ LDG.E R24, [R24.64] ; /* 0x0000000418187981 */
/* 0x0008a2000c1e1900 */
/*0410*/ IADD3 R6, R16, -0x800, RZ ; /* 0xfffff80010067810 */
/* 0x001fca0007ffe0ff */
/*0420*/ IMAD.WIDE.U32 R28, R6, R27, c[0x0][0x168] ; /* 0x00005a00061c7625 */
/* 0x002fc800078e001b */
/*0430*/ IMAD.WIDE.U32 R26, R16, R27, c[0x0][0x168] ; /* 0x00005a00101a7625 */
/* 0x000fe200078e001b */
/*0440*/ LDG.E R25, [R28.64] ; /* 0x000000041c197981 */
/* 0x010f2a000c1e1900 */
/*0450*/ LDG.E R26, [R26.64] ; /* 0x000000041a1a7981 */
/* 0x000f22000c1e1900 */
/*0460*/ FFMA R4, R4, R9, R8 ; /* 0x0000000904047223 */
/* 0x000fc80000000008 */
/*0470*/ FFMA R10, R5, R10, R4 ; /* 0x0000000a050a7223 */
/* 0x000fe40000000004 */
/*0480*/ LDS.128 R4, [0x10] ; /* 0x00001000ff047984 */
/* 0x000e240000000c00 */
/*0490*/ FFMA R10, R14, R11, R10 ; /* 0x0000000b0e0a7223 */
/* 0x000fc8000000000a */
/*04a0*/ FFMA R4, R15, R4, R10 ; /* 0x000000040f047223 */
/* 0x001fe4000000000a */
/*04b0*/ LDS.128 R8, [0x20] ; /* 0x00002000ff087984 */
/* 0x000e240000000c00 */
/*04c0*/ FFMA R4, R12, R5, R4 ; /* 0x000000050c047223 */
/* 0x008fc80000000004 */
/*04d0*/ FFMA R4, R13, R6, R4 ; /* 0x000000060d047223 */
/* 0x000fe40000000004 */
/*04e0*/ LDS.128 R12, [0x30] ; /* 0x00003000ff0c7984 */
/* 0x000e640000000c00 */
/*04f0*/ FFMA R4, R18, R7, R4 ; /* 0x0000000712047223 */
/* 0x020fe20000000004 */
/*0500*/ IADD3 R0, R0, 0x10, RZ ; /* 0x0000001000007810 */
/* 0x000fe20007ffe0ff */
/*0510*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe60000010000 */
/*0520*/ ISETP.GE.AND P1, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fe20003f26270 */
/*0530*/ FFMA R4, R19, R8, R4 ; /* 0x0000000813047223 */
/* 0x001fc80000000004 */
/*0540*/ FFMA R4, R20, R9, R4 ; /* 0x0000000914047223 */
/* 0x000fe20000000004 */
/*0550*/ IADD3 R2, R2, 0x8000, RZ ; /* 0x0000800002027810 */
/* 0x000fe40007ffe0ff */
/*0560*/ IADD3 R16, R16, 0x8000, RZ ; /* 0x0000800010107810 */
/* 0x000fe20007ffe0ff */
/*0570*/ FFMA R4, R21, R10, R4 ; /* 0x0000000a15047223 */
/* 0x004fc80000000004 */
/*0580*/ FFMA R4, R22, R11, R4 ; /* 0x0000000b16047223 */
/* 0x000fc80000000004 */
/*0590*/ FFMA R4, R23, R12, R4 ; /* 0x0000000c17047223 */
/* 0x002fc80000000004 */
/*05a0*/ FFMA R4, R24, R13, R4 ; /* 0x0000000d18047223 */
/* 0x000fc80000000004 */
/*05b0*/ FFMA R4, R25, R14, R4 ; /* 0x0000000e19047223 */
/* 0x010fc80000000004 */
/*05c0*/ FFMA R26, R26, R15, R4 ; /* 0x0000000f1a1a7223 */
/* 0x000fe20000000004 */
/*05d0*/ @!P1 BRA 0x100 ; /* 0xfffffb2000009947 */
/* 0x000fea000383ffff */
/*05e0*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*05f0*/ S2UR UR6, SR_CTAID.Y ; /* 0x00000000000679c3 */
/* 0x000e620000002600 */
/*0600*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe400000001ff */
/*0610*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0620*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*0630*/ LEA R3, R0, R3, 0x8 ; /* 0x0000000300037211 */
/* 0x001fc400078e40ff */
/*0640*/ IADD3 R0, R2, UR6, RZ ; /* 0x0000000602007c10 */
/* 0x002fc8000fffe0ff */
/*0650*/ LEA R0, R0, R3, 0xb ; /* 0x0000000300007211 */
/* 0x000fca00078e58ff */
/*0660*/ IMAD.WIDE.U32 R2, R0, R5, c[0x0][0x170] ; /* 0x00005c0000027625 */
/* 0x000fca00078e0005 */
/*0670*/ STG.E [R2.64], R26 ; /* 0x0000001a02007986 */
/* 0x000fe2000c101904 */
/*0680*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0690*/ BRA 0x690; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*06a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0700*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0710*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0720*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0730*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0740*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0750*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0760*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0770*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #define COALESCED_NUM 16
#define blockDimX 256
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 2048
#define WIDTH_A 2048
__global__ void matmul(float * A, float * B, float * C, int width, int height)
{
__shared__ float shared_0[16];
int i;
float sum;
sum=0;
for (i=0; i<width; i=(i+16))
{
int it_1;
if ((tidx<16))
{
shared_0[(tidx+0)]=A(idy, (i+tidx));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<16; it_1=(it_1+1))
{
float a;
float b;
a=shared_0[it_1];
b=B((it_1+i), idx);
sum+=(a*b);
}
__syncthreads();
}
{
C(idy, idx)=sum;
}
} | .file "tmpxft_0019ad65_00000000-6_gcompiler_matmul_THREADBLOCK_X_16__THREAD_Y_32_.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z6matmulPfS_S_iiPfS_S_ii
.type _Z31__device_stub__Z6matmulPfS_S_iiPfS_S_ii, @function
_Z31__device_stub__Z6matmulPfS_S_iiPfS_S_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6matmulPfS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z6matmulPfS_S_iiPfS_S_ii, .-_Z31__device_stub__Z6matmulPfS_S_iiPfS_S_ii
.globl _Z6matmulPfS_S_ii
.type _Z6matmulPfS_S_ii, @function
_Z6matmulPfS_S_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z6matmulPfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z6matmulPfS_S_ii, .-_Z6matmulPfS_S_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z6matmulPfS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z6matmulPfS_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #define COALESCED_NUM 16
#define blockDimX 256
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 2048
#define WIDTH_A 2048
__global__ void matmul(float * A, float * B, float * C, int width, int height)
{
__shared__ float shared_0[16];
int i;
float sum;
sum=0;
for (i=0; i<width; i=(i+16))
{
int it_1;
if ((tidx<16))
{
shared_0[(tidx+0)]=A(idy, (i+tidx));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<16; it_1=(it_1+1))
{
float a;
float b;
a=shared_0[it_1];
b=B((it_1+i), idx);
sum+=(a*b);
}
__syncthreads();
}
{
C(idy, idx)=sum;
}
} | #include <hip/hip_runtime.h>
#define COALESCED_NUM 16
#define blockDimX 256
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 2048
#define WIDTH_A 2048
__global__ void matmul(float * A, float * B, float * C, int width, int height)
{
__shared__ float shared_0[16];
int i;
float sum;
sum=0;
for (i=0; i<width; i=(i+16))
{
int it_1;
if ((tidx<16))
{
shared_0[(tidx+0)]=A(idy, (i+tidx));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<16; it_1=(it_1+1))
{
float a;
float b;
a=shared_0[it_1];
b=B((it_1+i), idx);
sum+=(a*b);
}
__syncthreads();
}
{
C(idy, idx)=sum;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#define COALESCED_NUM 16
#define blockDimX 256
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 2048
#define WIDTH_A 2048
__global__ void matmul(float * A, float * B, float * C, int width, int height)
{
__shared__ float shared_0[16];
int i;
float sum;
sum=0;
for (i=0; i<width; i=(i+16))
{
int it_1;
if ((tidx<16))
{
shared_0[(tidx+0)]=A(idy, (i+tidx));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<16; it_1=(it_1+1))
{
float a;
float b;
a=shared_0[it_1];
b=B((it_1+i), idx);
sum+=(a*b);
}
__syncthreads();
}
{
C(idy, idx)=sum;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6matmulPfS_S_ii
.globl _Z6matmulPfS_S_ii
.p2align 8
.type _Z6matmulPfS_S_ii,@function
_Z6matmulPfS_S_ii:
s_load_b32 s3, s[0:1], 0x18
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s3, 1
s_cbranch_scc1 .LBB0_5
s_load_b128 s[4:7], s[0:1], 0x0
v_dual_mov_b32 v2, 0 :: v_dual_and_b32 v5, 0x3ff, v0
v_add_lshl_u32 v6, s15, v3, 11
s_lshl_b32 s8, s14, 8
v_mov_b32_e32 v4, 0
s_delay_alu instid0(VALU_DEP_3)
v_cmp_gt_u32_e32 vcc_lo, 16, v5
v_lshlrev_b32_e32 v7, 2, v5
s_mov_b32 s9, 0
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s10
v_dual_mov_b32 v9, 0 :: v_dual_add_nc_u32 v8, s8, v5
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_dual_mov_b32 v13, v9 :: v_dual_add_nc_u32 v12, 0x800, v8
v_lshlrev_b64 v[10:11], 2, v[8:9]
v_dual_mov_b32 v15, v9 :: v_dual_add_nc_u32 v14, 0x1000, v8
v_dual_mov_b32 v17, v9 :: v_dual_add_nc_u32 v16, 0x2000, v8
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[12:13], 2, v[12:13]
v_add_co_u32 v10, s2, s6, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e64 v11, s2, s7, v11, s2
v_lshlrev_b64 v[14:15], 2, v[14:15]
v_add_co_u32 v12, s2, s6, v12
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v13, s2, s7, v13, s2
global_load_b32 v1, v[10:11], off
v_dual_mov_b32 v19, v9 :: v_dual_add_nc_u32 v18, 0x2800, v8
global_load_b32 v26, v[12:13], off
v_dual_mov_b32 v11, v9 :: v_dual_add_nc_u32 v10, 0x1800, v8
v_add_co_u32 v14, s2, s6, v14
v_lshlrev_b64 v[16:17], 2, v[16:17]
v_dual_mov_b32 v21, v9 :: v_dual_add_nc_u32 v20, 0x3000, v8
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[10:11], 2, v[10:11]
v_add_co_ci_u32_e64 v15, s2, s7, v15, s2
v_lshlrev_b64 v[18:19], 2, v[18:19]
v_dual_mov_b32 v23, v9 :: v_dual_add_nc_u32 v22, 0x3800, v8
v_add_co_u32 v10, s2, s6, v10
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v11, s2, s7, v11, s2
v_add_co_u32 v16, s2, s6, v16
v_lshlrev_b64 v[20:21], 2, v[20:21]
v_add_co_ci_u32_e64 v17, s2, s7, v17, s2
v_add_co_u32 v18, s2, s6, v18
v_lshlrev_b64 v[22:23], 2, v[22:23]
v_add_co_ci_u32_e64 v19, s2, s7, v19, s2
v_add_co_u32 v20, s2, s6, v20
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e64 v21, s2, s7, v21, s2
v_add_co_u32 v22, s2, s6, v22
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v23, s2, s7, v23, s2
s_clause 0x5
global_load_b32 v27, v[14:15], off
global_load_b32 v28, v[10:11], off
global_load_b32 v29, v[16:17], off
global_load_b32 v30, v[18:19], off
global_load_b32 v20, v[20:21], off
global_load_b32 v21, v[22:23], off
v_dual_mov_b32 v25, v9 :: v_dual_add_nc_u32 v24, 0x4000, v8
v_dual_mov_b32 v13, v9 :: v_dual_add_nc_u32 v12, 0x4800, v8
v_dual_mov_b32 v17, v9 :: v_dual_add_nc_u32 v16, 0x5800, v8
v_mov_b32_e32 v19, v9
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[24:25], 2, v[24:25]
v_lshlrev_b64 v[10:11], 2, v[12:13]
v_add_nc_u32_e32 v12, 0x5000, v8
v_add_nc_u32_e32 v18, 0x6000, v8
v_lshlrev_b64 v[16:17], 2, v[16:17]
v_add_nc_u32_e32 v6, 16, v6
v_add_co_u32 v14, s2, s6, v24
v_lshlrev_b64 v[12:13], 2, v[12:13]
v_add_co_ci_u32_e64 v15, s2, s7, v25, s2
v_add_co_u32 v10, s2, s6, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e64 v11, s2, s7, v11, s2
v_add_co_u32 v12, s2, s6, v12
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v13, s2, s7, v13, s2
s_clause 0x2
global_load_b32 v22, v[14:15], off
global_load_b32 v23, v[10:11], off
global_load_b32 v24, v[12:13], off
v_dual_mov_b32 v13, v9 :: v_dual_add_nc_u32 v12, 0x6800, v8
v_lshlrev_b64 v[10:11], 2, v[18:19]
v_add_co_u32 v14, s2, s6, v16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v15, s2, s7, v17, s2
v_mov_b32_e32 v17, v9
v_lshlrev_b64 v[12:13], 2, v[12:13]
v_add_nc_u32_e32 v16, 0x7000, v8
v_add_co_u32 v10, s2, s6, v10
v_add_co_ci_u32_e64 v11, s2, s7, v11, s2
v_add_nc_u32_e32 v8, 0x7800, v8
v_add_co_u32 v12, s2, s6, v12
global_load_b32 v18, v[14:15], off
v_lshlrev_b64 v[14:15], 2, v[16:17]
v_add_co_ci_u32_e64 v13, s2, s7, v13, s2
v_lshlrev_b64 v[8:9], 2, v[8:9]
s_clause 0x1
global_load_b32 v16, v[10:11], off
global_load_b32 v17, v[12:13], off
v_add_co_u32 v10, s2, s6, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v11, s2, s7, v15, s2
v_add_co_u32 v8, s2, s6, v8
v_add_co_ci_u32_e64 v9, s2, s7, v9, s2
s_clause 0x1
global_load_b32 v19, v[10:11], off
global_load_b32 v25, v[8:9], off
ds_load_2addr_b32 v[8:9], v2 offset1:1
ds_load_2addr_b32 v[10:11], v2 offset0:2 offset1:3
ds_load_2addr_b32 v[12:13], v2 offset0:4 offset1:5
ds_load_2addr_b32 v[14:15], v2 offset0:6 offset1:7
s_add_i32 s9, s9, 16
s_add_i32 s8, s8, 0x8000
s_cmp_lt_i32 s9, s3
s_waitcnt vmcnt(15) lgkmcnt(3)
v_fmac_f32_e32 v4, v8, v1
s_waitcnt vmcnt(14)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v9, v26
ds_load_2addr_b32 v[8:9], v2 offset0:8 offset1:9
s_waitcnt vmcnt(13) lgkmcnt(3)
v_fmac_f32_e32 v4, v10, v27
s_waitcnt vmcnt(12)
v_fmac_f32_e32 v4, v11, v28
ds_load_2addr_b32 v[10:11], v2 offset0:10 offset1:11
s_waitcnt vmcnt(11) lgkmcnt(3)
v_fmac_f32_e32 v4, v12, v29
s_waitcnt vmcnt(10)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v13, v30
s_waitcnt vmcnt(9) lgkmcnt(2)
v_fmac_f32_e32 v4, v14, v20
s_waitcnt vmcnt(8)
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v4, v15, v21
ds_load_2addr_b32 v[12:13], v2 offset0:12 offset1:13
ds_load_2addr_b32 v[14:15], v2 offset0:14 offset1:15
s_waitcnt vmcnt(0) lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_fmac_f32_e32 v4, v8, v22
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v9, v23
v_fmac_f32_e32 v4, v10, v24
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v11, v18
v_fmac_f32_e32 v4, v12, v16
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v13, v17
v_fmac_f32_e32 v4, v14, v19
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v4, v15, v25
s_cbranch_scc0 .LBB0_6
.LBB0_3:
s_and_saveexec_b32 s10, vcc_lo
s_cbranch_execz .LBB0_2
v_add_nc_u32_e32 v1, v5, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[8:9], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v8, s2, s4, v8
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v9, s2, s5, v9, s2
global_load_b32 v1, v[8:9], off
s_waitcnt vmcnt(0)
ds_store_b32 v7, v1
s_branch .LBB0_2
.LBB0_5:
v_mov_b32_e32 v4, 0
.LBB0_6:
s_load_b64 s[0:1], s[0:1], 0x10
v_add_lshl_u32 v2, s15, v3, 11
v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
s_lshl_b32 s2, s14, 8
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_add3_u32 v0, s2, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v4, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6matmulPfS_S_ii
.amdhsa_group_segment_fixed_size 64
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 31
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6matmulPfS_S_ii, .Lfunc_end0-_Z6matmulPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 64
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6matmulPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6matmulPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 31
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#define COALESCED_NUM 16
#define blockDimX 256
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 2048
#define WIDTH_A 2048
__global__ void matmul(float * A, float * B, float * C, int width, int height)
{
__shared__ float shared_0[16];
int i;
float sum;
sum=0;
for (i=0; i<width; i=(i+16))
{
int it_1;
if ((tidx<16))
{
shared_0[(tidx+0)]=A(idy, (i+tidx));
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<16; it_1=(it_1+1))
{
float a;
float b;
a=shared_0[it_1];
b=B((it_1+i), idx);
sum+=(a*b);
}
__syncthreads();
}
{
C(idy, idx)=sum;
}
} | .text
.file "gcompiler_matmul_THREADBLOCK_X_16__THREAD_Y_32_.hip"
.globl _Z21__device_stub__matmulPfS_S_ii # -- Begin function _Z21__device_stub__matmulPfS_S_ii
.p2align 4, 0x90
.type _Z21__device_stub__matmulPfS_S_ii,@function
_Z21__device_stub__matmulPfS_S_ii: # @_Z21__device_stub__matmulPfS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6matmulPfS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__matmulPfS_S_ii, .Lfunc_end0-_Z21__device_stub__matmulPfS_S_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6matmulPfS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6matmulPfS_S_ii,@object # @_Z6matmulPfS_S_ii
.section .rodata,"a",@progbits
.globl _Z6matmulPfS_S_ii
.p2align 3, 0x0
_Z6matmulPfS_S_ii:
.quad _Z21__device_stub__matmulPfS_S_ii
.size _Z6matmulPfS_S_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z6matmulPfS_S_ii"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__matmulPfS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6matmulPfS_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6matmulPfS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ ISETP.LT.AND P0, PT, RZ, c[0x0][0x178], PT ; /* 0x00005e00ff007a0c */
/* 0x000fe20003f01270 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd80000000a00 */
/*0030*/ @!P0 MOV R26, RZ ; /* 0x000000ff001a8202 */
/* 0x000fe20000000f00 */
/*0040*/ @!P0 BRA 0x5e0 ; /* 0x0000059000008947 */
/* 0x000fea0003800000 */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0060*/ MOV R26, RZ ; /* 0x000000ff001a7202 */
/* 0x000fc60000000f00 */
/*0070*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */
/* 0x000e680000002200 */
/*0080*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000ea80000002500 */
/*0090*/ S2R R4, SR_CTAID.Y ; /* 0x0000000000047919 */
/* 0x000ee20000002600 */
/*00a0*/ ISETP.GT.U32.AND P0, PT, R3, 0xf, PT ; /* 0x0000000f0300780c */
/* 0x001fe40003f04070 */
/*00b0*/ LEA R17, R0, R3.reuse, 0xb ; /* 0x0000000300117211 */
/* 0x082fe200078e58ff */
/*00c0*/ HFMA2.MMA R0, -RZ, RZ, 0, 0 ; /* 0x00000000ff007435 */
/* 0x000fe200000001ff */
/*00d0*/ LEA R2, R2, R3, 0x8 ; /* 0x0000000302027211 */
/* 0x004fc400078e40ff */
/*00e0*/ LEA R17, R4, R17, 0xb ; /* 0x0000001104117211 */
/* 0x008fe400078e58ff */
/*00f0*/ IADD3 R16, R2, 0x7800, RZ ; /* 0x0000780002107810 */
/* 0x000fca0007ffe0ff */
/*0100*/ @!P0 IADD3 R6, R17, R0, RZ ; /* 0x0000000011068210 */
/* 0x000fe40007ffe0ff */
/*0110*/ MOV R27, 0x4 ; /* 0x00000004001b7802 */
/* 0x000fca0000000f00 */
/*0120*/ @!P0 IMAD.WIDE.U32 R6, R6, R27, c[0x0][0x160] ; /* 0x0000580006068625 */
/* 0x000fcc00078e001b */
/*0130*/ @!P0 LDG.E R6, [R6.64] ; /* 0x0000000406068981 */
/* 0x000ea2000c1e1900 */
/*0140*/ IMAD.WIDE.U32 R8, R2.reuse, R27, c[0x0][0x168] ; /* 0x00005a0002087625 */
/* 0x040fe200078e001b */
/*0150*/ IADD3 R12, R2.reuse, 0x800, RZ ; /* 0x00000800020c7810 */
/* 0x040fe40007ffe0ff */
/*0160*/ IADD3 R18, R2, 0x1000, RZ ; /* 0x0000100002127810 */
/* 0x000fc60007ffe0ff */
/*0170*/ IMAD.WIDE.U32 R12, R12, R27, c[0x0][0x168] ; /* 0x00005a000c0c7625 */
/* 0x000fc800078e001b */
/*0180*/ IMAD.WIDE.U32 R18, R18, R27, c[0x0][0x168] ; /* 0x00005a0012127625 */
/* 0x000fe200078e001b */
/*0190*/ IADD3 R10, R2.reuse, 0x1800, RZ ; /* 0x00001800020a7810 */
/* 0x040fe40007ffe0ff */
/*01a0*/ IADD3 R24, R2, 0x2000, RZ ; /* 0x0000200002187810 */
/* 0x000fca0007ffe0ff */
/*01b0*/ IMAD.WIDE.U32 R24, R24, R27.reuse, c[0x0][0x168] ; /* 0x00005a0018187625 */
/* 0x080fe200078e001b */
/*01c0*/ IADD3 R20, R2, 0x2800, RZ ; /* 0x0000280002147810 */
/* 0x000fe20007ffe0ff */
/*01d0*/ @!P0 STS [R3.X4], R6 ; /* 0x0000000603008388 */
/* 0x0041e80000004800 */
/*01e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*01f0*/ LDG.E R21, [R8.64] ; /* 0x0000000408157981 */
/* 0x0002a8000c1e1900 */
/*0200*/ LDG.E R4, [R12.64] ; /* 0x000000040c047981 */
/* 0x000728000c1e1900 */
/*0210*/ LDG.E R5, [R18.64] ; /* 0x0000000412057981 */
/* 0x000b22000c1e1900 */
/*0220*/ IMAD.WIDE.U32 R6, R10, R27, c[0x0][0x168] ; /* 0x00005a000a067625 */
/* 0x001fc600078e001b */
/*0230*/ LDG.E R15, [R24.64] ; /* 0x00000004180f7981 */
/* 0x000128000c1e1900 */
/*0240*/ LDG.E R14, [R6.64] ; /* 0x00000004060e7981 */
/* 0x000122000c1e1900 */
/*0250*/ IADD3 R22, R2, 0x3000, RZ ; /* 0x0000300002167810 */
/* 0x000fe20007ffe0ff */
/*0260*/ IMAD.WIDE.U32 R12, R20, R27.reuse, c[0x0][0x168] ; /* 0x00005a00140c7625 */
/* 0x088fe400078e001b */
/*0270*/ LDS.128 R8, [RZ] ; /* 0x00000000ff087984 */
/* 0x002ea40000000c00 */
/*0280*/ IMAD.WIDE.U32 R22, R22, R27, c[0x0][0x168] ; /* 0x00005a0016167625 */
/* 0x000fc400078e001b */
/*0290*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x0002e8000c1e1900 */
/*02a0*/ LDG.E R13, [R22.64] ; /* 0x00000004160d7981 */
/* 0x0022e2000c1e1900 */
/*02b0*/ IADD3 R28, R2, 0x3800, RZ ; /* 0x00003800021c7810 */
/* 0x000fe40007ffe0ff */
/*02c0*/ IADD3 R18, R16, -0x3800, RZ ; /* 0xffffc80010127810 */
/* 0x020fc60007ffe0ff */
/*02d0*/ IMAD.WIDE.U32 R28, R28, R27, c[0x0][0x168] ; /* 0x00005a001c1c7625 */
/* 0x000fe200078e001b */
/*02e0*/ IADD3 R24, R16, -0x3000, RZ ; /* 0xffffd00010187810 */
/* 0x001fc60007ffe0ff */
/*02f0*/ IMAD.WIDE.U32 R6, R18, R27.reuse, c[0x0][0x168] ; /* 0x00005a0012067625 */
/* 0x080fe400078e001b */
/*0300*/ LDG.E R18, [R28.64] ; /* 0x000000041c127981 */
/* 0x000162000c1e1900 */
/*0310*/ IADD3 R22, R16, -0x2800, RZ ; /* 0xffffd80010167810 */
/* 0x002fe20007ffe0ff */
/*0320*/ IMAD.WIDE.U32 R24, R24, R27.reuse, c[0x0][0x168] ; /* 0x00005a0018187625 */
/* 0x080fe400078e001b */
/*0330*/ LDG.E R19, [R6.64] ; /* 0x0000000406137981 */
/* 0x0002e4000c1e1900 */
/*0340*/ IMAD.WIDE.U32 R22, R22, R27, c[0x0][0x168] ; /* 0x00005a0016167625 */
/* 0x000fe400078e001b */
/*0350*/ LDG.E R20, [R24.64] ; /* 0x0000000418147981 */
/* 0x0004e2000c1e1900 */
/*0360*/ IADD3 R28, R16, -0x1800, RZ ; /* 0xffffe800101c7810 */
/* 0x001fc40007ffe0ff */
/*0370*/ IADD3 R6, R16, -0x2000, RZ ; /* 0xffffe00010067810 */
/* 0x002fc60007ffe0ff */
/*0380*/ IMAD.WIDE.U32 R28, R28, R27, c[0x0][0x168] ; /* 0x00005a001c1c7625 */
/* 0x000fc800078e001b */
/*0390*/ IMAD.WIDE.U32 R6, R6, R27, c[0x0][0x168] ; /* 0x00005a0006067625 */
/* 0x000fc800078e001b */
/*03a0*/ FFMA R8, R21, R8, R26 ; /* 0x0000000815087223 */
/* 0x004fe2000000001a */
/*03b0*/ IADD3 R26, R16, -0x1000, RZ ; /* 0xfffff000101a7810 */
/* 0x000fe20007ffe0ff */
/*03c0*/ LDG.E R21, [R22.64] ; /* 0x0000000416157981 */
/* 0x0000a8000c1e1900 */
/*03d0*/ IMAD.WIDE.U32 R24, R26, R27, c[0x0][0x168] ; /* 0x00005a001a187625 */
/* 0x000fe200078e001b */
/*03e0*/ LDG.E R22, [R6.64] ; /* 0x0000000406167981 */
/* 0x0010a8000c1e1900 */
/*03f0*/ LDG.E R23, [R28.64] ; /* 0x000000041c177981 */
/* 0x0002a8000c1e1900 */
/*0400*/ LDG.E R24, [R24.64] ; /* 0x0000000418187981 */
/* 0x0008a2000c1e1900 */
/*0410*/ IADD3 R6, R16, -0x800, RZ ; /* 0xfffff80010067810 */
/* 0x001fca0007ffe0ff */
/*0420*/ IMAD.WIDE.U32 R28, R6, R27, c[0x0][0x168] ; /* 0x00005a00061c7625 */
/* 0x002fc800078e001b */
/*0430*/ IMAD.WIDE.U32 R26, R16, R27, c[0x0][0x168] ; /* 0x00005a00101a7625 */
/* 0x000fe200078e001b */
/*0440*/ LDG.E R25, [R28.64] ; /* 0x000000041c197981 */
/* 0x010f2a000c1e1900 */
/*0450*/ LDG.E R26, [R26.64] ; /* 0x000000041a1a7981 */
/* 0x000f22000c1e1900 */
/*0460*/ FFMA R4, R4, R9, R8 ; /* 0x0000000904047223 */
/* 0x000fc80000000008 */
/*0470*/ FFMA R10, R5, R10, R4 ; /* 0x0000000a050a7223 */
/* 0x000fe40000000004 */
/*0480*/ LDS.128 R4, [0x10] ; /* 0x00001000ff047984 */
/* 0x000e240000000c00 */
/*0490*/ FFMA R10, R14, R11, R10 ; /* 0x0000000b0e0a7223 */
/* 0x000fc8000000000a */
/*04a0*/ FFMA R4, R15, R4, R10 ; /* 0x000000040f047223 */
/* 0x001fe4000000000a */
/*04b0*/ LDS.128 R8, [0x20] ; /* 0x00002000ff087984 */
/* 0x000e240000000c00 */
/*04c0*/ FFMA R4, R12, R5, R4 ; /* 0x000000050c047223 */
/* 0x008fc80000000004 */
/*04d0*/ FFMA R4, R13, R6, R4 ; /* 0x000000060d047223 */
/* 0x000fe40000000004 */
/*04e0*/ LDS.128 R12, [0x30] ; /* 0x00003000ff0c7984 */
/* 0x000e640000000c00 */
/*04f0*/ FFMA R4, R18, R7, R4 ; /* 0x0000000712047223 */
/* 0x020fe20000000004 */
/*0500*/ IADD3 R0, R0, 0x10, RZ ; /* 0x0000001000007810 */
/* 0x000fe20007ffe0ff */
/*0510*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe60000010000 */
/*0520*/ ISETP.GE.AND P1, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fe20003f26270 */
/*0530*/ FFMA R4, R19, R8, R4 ; /* 0x0000000813047223 */
/* 0x001fc80000000004 */
/*0540*/ FFMA R4, R20, R9, R4 ; /* 0x0000000914047223 */
/* 0x000fe20000000004 */
/*0550*/ IADD3 R2, R2, 0x8000, RZ ; /* 0x0000800002027810 */
/* 0x000fe40007ffe0ff */
/*0560*/ IADD3 R16, R16, 0x8000, RZ ; /* 0x0000800010107810 */
/* 0x000fe20007ffe0ff */
/*0570*/ FFMA R4, R21, R10, R4 ; /* 0x0000000a15047223 */
/* 0x004fc80000000004 */
/*0580*/ FFMA R4, R22, R11, R4 ; /* 0x0000000b16047223 */
/* 0x000fc80000000004 */
/*0590*/ FFMA R4, R23, R12, R4 ; /* 0x0000000c17047223 */
/* 0x002fc80000000004 */
/*05a0*/ FFMA R4, R24, R13, R4 ; /* 0x0000000d18047223 */
/* 0x000fc80000000004 */
/*05b0*/ FFMA R4, R25, R14, R4 ; /* 0x0000000e19047223 */
/* 0x010fc80000000004 */
/*05c0*/ FFMA R26, R26, R15, R4 ; /* 0x0000000f1a1a7223 */
/* 0x000fe20000000004 */
/*05d0*/ @!P1 BRA 0x100 ; /* 0xfffffb2000009947 */
/* 0x000fea000383ffff */
/*05e0*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*05f0*/ S2UR UR6, SR_CTAID.Y ; /* 0x00000000000679c3 */
/* 0x000e620000002600 */
/*0600*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe400000001ff */
/*0610*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0620*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*0630*/ LEA R3, R0, R3, 0x8 ; /* 0x0000000300037211 */
/* 0x001fc400078e40ff */
/*0640*/ IADD3 R0, R2, UR6, RZ ; /* 0x0000000602007c10 */
/* 0x002fc8000fffe0ff */
/*0650*/ LEA R0, R0, R3, 0xb ; /* 0x0000000300007211 */
/* 0x000fca00078e58ff */
/*0660*/ IMAD.WIDE.U32 R2, R0, R5, c[0x0][0x170] ; /* 0x00005c0000027625 */
/* 0x000fca00078e0005 */
/*0670*/ STG.E [R2.64], R26 ; /* 0x0000001a02007986 */
/* 0x000fe2000c101904 */
/*0680*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0690*/ BRA 0x690; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*06a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0700*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0710*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0720*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0730*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0740*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0750*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0760*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0770*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6matmulPfS_S_ii
.globl _Z6matmulPfS_S_ii
.p2align 8
.type _Z6matmulPfS_S_ii,@function
_Z6matmulPfS_S_ii:
s_load_b32 s3, s[0:1], 0x18
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s3, 1
s_cbranch_scc1 .LBB0_5
s_load_b128 s[4:7], s[0:1], 0x0
v_dual_mov_b32 v2, 0 :: v_dual_and_b32 v5, 0x3ff, v0
v_add_lshl_u32 v6, s15, v3, 11
s_lshl_b32 s8, s14, 8
v_mov_b32_e32 v4, 0
s_delay_alu instid0(VALU_DEP_3)
v_cmp_gt_u32_e32 vcc_lo, 16, v5
v_lshlrev_b32_e32 v7, 2, v5
s_mov_b32 s9, 0
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s10
v_dual_mov_b32 v9, 0 :: v_dual_add_nc_u32 v8, s8, v5
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_dual_mov_b32 v13, v9 :: v_dual_add_nc_u32 v12, 0x800, v8
v_lshlrev_b64 v[10:11], 2, v[8:9]
v_dual_mov_b32 v15, v9 :: v_dual_add_nc_u32 v14, 0x1000, v8
v_dual_mov_b32 v17, v9 :: v_dual_add_nc_u32 v16, 0x2000, v8
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[12:13], 2, v[12:13]
v_add_co_u32 v10, s2, s6, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e64 v11, s2, s7, v11, s2
v_lshlrev_b64 v[14:15], 2, v[14:15]
v_add_co_u32 v12, s2, s6, v12
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v13, s2, s7, v13, s2
global_load_b32 v1, v[10:11], off
v_dual_mov_b32 v19, v9 :: v_dual_add_nc_u32 v18, 0x2800, v8
global_load_b32 v26, v[12:13], off
v_dual_mov_b32 v11, v9 :: v_dual_add_nc_u32 v10, 0x1800, v8
v_add_co_u32 v14, s2, s6, v14
v_lshlrev_b64 v[16:17], 2, v[16:17]
v_dual_mov_b32 v21, v9 :: v_dual_add_nc_u32 v20, 0x3000, v8
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[10:11], 2, v[10:11]
v_add_co_ci_u32_e64 v15, s2, s7, v15, s2
v_lshlrev_b64 v[18:19], 2, v[18:19]
v_dual_mov_b32 v23, v9 :: v_dual_add_nc_u32 v22, 0x3800, v8
v_add_co_u32 v10, s2, s6, v10
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v11, s2, s7, v11, s2
v_add_co_u32 v16, s2, s6, v16
v_lshlrev_b64 v[20:21], 2, v[20:21]
v_add_co_ci_u32_e64 v17, s2, s7, v17, s2
v_add_co_u32 v18, s2, s6, v18
v_lshlrev_b64 v[22:23], 2, v[22:23]
v_add_co_ci_u32_e64 v19, s2, s7, v19, s2
v_add_co_u32 v20, s2, s6, v20
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e64 v21, s2, s7, v21, s2
v_add_co_u32 v22, s2, s6, v22
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v23, s2, s7, v23, s2
s_clause 0x5
global_load_b32 v27, v[14:15], off
global_load_b32 v28, v[10:11], off
global_load_b32 v29, v[16:17], off
global_load_b32 v30, v[18:19], off
global_load_b32 v20, v[20:21], off
global_load_b32 v21, v[22:23], off
v_dual_mov_b32 v25, v9 :: v_dual_add_nc_u32 v24, 0x4000, v8
v_dual_mov_b32 v13, v9 :: v_dual_add_nc_u32 v12, 0x4800, v8
v_dual_mov_b32 v17, v9 :: v_dual_add_nc_u32 v16, 0x5800, v8
v_mov_b32_e32 v19, v9
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[24:25], 2, v[24:25]
v_lshlrev_b64 v[10:11], 2, v[12:13]
v_add_nc_u32_e32 v12, 0x5000, v8
v_add_nc_u32_e32 v18, 0x6000, v8
v_lshlrev_b64 v[16:17], 2, v[16:17]
v_add_nc_u32_e32 v6, 16, v6
v_add_co_u32 v14, s2, s6, v24
v_lshlrev_b64 v[12:13], 2, v[12:13]
v_add_co_ci_u32_e64 v15, s2, s7, v25, s2
v_add_co_u32 v10, s2, s6, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e64 v11, s2, s7, v11, s2
v_add_co_u32 v12, s2, s6, v12
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v13, s2, s7, v13, s2
s_clause 0x2
global_load_b32 v22, v[14:15], off
global_load_b32 v23, v[10:11], off
global_load_b32 v24, v[12:13], off
v_dual_mov_b32 v13, v9 :: v_dual_add_nc_u32 v12, 0x6800, v8
v_lshlrev_b64 v[10:11], 2, v[18:19]
v_add_co_u32 v14, s2, s6, v16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v15, s2, s7, v17, s2
v_mov_b32_e32 v17, v9
v_lshlrev_b64 v[12:13], 2, v[12:13]
v_add_nc_u32_e32 v16, 0x7000, v8
v_add_co_u32 v10, s2, s6, v10
v_add_co_ci_u32_e64 v11, s2, s7, v11, s2
v_add_nc_u32_e32 v8, 0x7800, v8
v_add_co_u32 v12, s2, s6, v12
global_load_b32 v18, v[14:15], off
v_lshlrev_b64 v[14:15], 2, v[16:17]
v_add_co_ci_u32_e64 v13, s2, s7, v13, s2
v_lshlrev_b64 v[8:9], 2, v[8:9]
s_clause 0x1
global_load_b32 v16, v[10:11], off
global_load_b32 v17, v[12:13], off
v_add_co_u32 v10, s2, s6, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v11, s2, s7, v15, s2
v_add_co_u32 v8, s2, s6, v8
v_add_co_ci_u32_e64 v9, s2, s7, v9, s2
s_clause 0x1
global_load_b32 v19, v[10:11], off
global_load_b32 v25, v[8:9], off
ds_load_2addr_b32 v[8:9], v2 offset1:1
ds_load_2addr_b32 v[10:11], v2 offset0:2 offset1:3
ds_load_2addr_b32 v[12:13], v2 offset0:4 offset1:5
ds_load_2addr_b32 v[14:15], v2 offset0:6 offset1:7
s_add_i32 s9, s9, 16
s_add_i32 s8, s8, 0x8000
s_cmp_lt_i32 s9, s3
s_waitcnt vmcnt(15) lgkmcnt(3)
v_fmac_f32_e32 v4, v8, v1
s_waitcnt vmcnt(14)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v9, v26
ds_load_2addr_b32 v[8:9], v2 offset0:8 offset1:9
s_waitcnt vmcnt(13) lgkmcnt(3)
v_fmac_f32_e32 v4, v10, v27
s_waitcnt vmcnt(12)
v_fmac_f32_e32 v4, v11, v28
ds_load_2addr_b32 v[10:11], v2 offset0:10 offset1:11
s_waitcnt vmcnt(11) lgkmcnt(3)
v_fmac_f32_e32 v4, v12, v29
s_waitcnt vmcnt(10)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v13, v30
s_waitcnt vmcnt(9) lgkmcnt(2)
v_fmac_f32_e32 v4, v14, v20
s_waitcnt vmcnt(8)
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v4, v15, v21
ds_load_2addr_b32 v[12:13], v2 offset0:12 offset1:13
ds_load_2addr_b32 v[14:15], v2 offset0:14 offset1:15
s_waitcnt vmcnt(0) lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_fmac_f32_e32 v4, v8, v22
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v9, v23
v_fmac_f32_e32 v4, v10, v24
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v11, v18
v_fmac_f32_e32 v4, v12, v16
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v13, v17
v_fmac_f32_e32 v4, v14, v19
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v4, v15, v25
s_cbranch_scc0 .LBB0_6
.LBB0_3:
s_and_saveexec_b32 s10, vcc_lo
s_cbranch_execz .LBB0_2
v_add_nc_u32_e32 v1, v5, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[8:9], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v8, s2, s4, v8
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v9, s2, s5, v9, s2
global_load_b32 v1, v[8:9], off
s_waitcnt vmcnt(0)
ds_store_b32 v7, v1
s_branch .LBB0_2
.LBB0_5:
v_mov_b32_e32 v4, 0
.LBB0_6:
s_load_b64 s[0:1], s[0:1], 0x10
v_add_lshl_u32 v2, s15, v3, 11
v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
s_lshl_b32 s2, s14, 8
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_add3_u32 v0, s2, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v4, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6matmulPfS_S_ii
.amdhsa_group_segment_fixed_size 64
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 31
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6matmulPfS_S_ii, .Lfunc_end0-_Z6matmulPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 64
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6matmulPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6matmulPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 31
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0019ad65_00000000-6_gcompiler_matmul_THREADBLOCK_X_16__THREAD_Y_32_.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z6matmulPfS_S_iiPfS_S_ii
.type _Z31__device_stub__Z6matmulPfS_S_iiPfS_S_ii, @function
_Z31__device_stub__Z6matmulPfS_S_iiPfS_S_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6matmulPfS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z6matmulPfS_S_iiPfS_S_ii, .-_Z31__device_stub__Z6matmulPfS_S_iiPfS_S_ii
.globl _Z6matmulPfS_S_ii
.type _Z6matmulPfS_S_ii, @function
_Z6matmulPfS_S_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z6matmulPfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z6matmulPfS_S_ii, .-_Z6matmulPfS_S_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z6matmulPfS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z6matmulPfS_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "gcompiler_matmul_THREADBLOCK_X_16__THREAD_Y_32_.hip"
.globl _Z21__device_stub__matmulPfS_S_ii # -- Begin function _Z21__device_stub__matmulPfS_S_ii
.p2align 4, 0x90
.type _Z21__device_stub__matmulPfS_S_ii,@function
_Z21__device_stub__matmulPfS_S_ii: # @_Z21__device_stub__matmulPfS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6matmulPfS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__matmulPfS_S_ii, .Lfunc_end0-_Z21__device_stub__matmulPfS_S_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6matmulPfS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6matmulPfS_S_ii,@object # @_Z6matmulPfS_S_ii
.section .rodata,"a",@progbits
.globl _Z6matmulPfS_S_ii
.p2align 3, 0x0
_Z6matmulPfS_S_ii:
.quad _Z21__device_stub__matmulPfS_S_ii
.size _Z6matmulPfS_S_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z6matmulPfS_S_ii"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__matmulPfS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6matmulPfS_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<iostream>
#include<cuda.h>
using namespace std;
__global__ void add(int *a,const int *b){
int i=blockIdx.x;
a[i]+=b[i];
}
int main(){
const int N=10;
int *a,*b,*temp;
temp=new int[N];
cudaMalloc(&a,N*sizeof(int));
cudaMalloc(&b,N*sizeof(int));
for(int i=0;i<N;i++)
temp[i]=i;
cudaMemcpy(a,temp,N*sizeof(int),cudaMemcpyHostToDevice);
for(int i=0;i<N;i++)
temp[i]=2*i;
cudaMemcpy(b,temp,N*sizeof(int),cudaMemcpyHostToDevice);
add<<<N,1>>>(a,b);
cudaMemcpy(temp,a,N*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++)
cout<<temp[i]<<endl;
delete[] temp;
cudaFree(a);
cudaFree(b);
} | code for sm_80
Function : _Z3addPiPKi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x001fc800078e0205 */
/*0050*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0205 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*0080*/ IADD3 R7, R2, R7, RZ ; /* 0x0000000702077210 */
/* 0x004fca0007ffe0ff */
/*0090*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<iostream>
#include<cuda.h>
using namespace std;
__global__ void add(int *a,const int *b){
int i=blockIdx.x;
a[i]+=b[i];
}
int main(){
const int N=10;
int *a,*b,*temp;
temp=new int[N];
cudaMalloc(&a,N*sizeof(int));
cudaMalloc(&b,N*sizeof(int));
for(int i=0;i<N;i++)
temp[i]=i;
cudaMemcpy(a,temp,N*sizeof(int),cudaMemcpyHostToDevice);
for(int i=0;i<N;i++)
temp[i]=2*i;
cudaMemcpy(b,temp,N*sizeof(int),cudaMemcpyHostToDevice);
add<<<N,1>>>(a,b);
cudaMemcpy(temp,a,N*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++)
cout<<temp[i]<<endl;
delete[] temp;
cudaFree(a);
cudaFree(b);
} | .file "tmpxft_00152e3b_00000000-6_test.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z3addPiPKiPiPKi
.type _Z25__device_stub__Z3addPiPKiPiPKi, @function
_Z25__device_stub__Z3addPiPKiPiPKi:
.LFB3694:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3addPiPKi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z25__device_stub__Z3addPiPKiPiPKi, .-_Z25__device_stub__Z3addPiPKiPiPKi
.globl _Z3addPiPKi
.type _Z3addPiPKi, @function
_Z3addPiPKi:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z3addPiPKiPiPKi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z3addPiPKi, .-_Z3addPiPKi
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $40, %edi
call _Znam@PLT
movq %rax, %r12
movq %rsp, %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
movl $0, %eax
.L12:
movl %eax, (%r12,%rax,4)
addq $1, %rax
cmpq $10, %rax
jne .L12
movl $1, %ecx
movl $40, %edx
movq %r12, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %eax
.L13:
leal (%rax,%rax), %edx
movl %edx, (%r12,%rax,4)
addq $1, %rax
cmpq $10, %rax
jne .L13
movl $1, %ecx
movl $40, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $10, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L14:
movl $2, %ecx
movl $40, %edx
movq (%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq %r12, %rbp
leaq 40(%r12), %r14
leaq _ZSt4cout(%rip), %r13
jmp .L19
.L25:
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z25__device_stub__Z3addPiPKiPiPKi
jmp .L14
.L28:
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L26
call _ZSt16__throw_bad_castv@PLT
.L26:
call __stack_chk_fail@PLT
.L17:
movq %r15, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r15), %rax
movl $10, %esi
movq %r15, %rdi
call *48(%rax)
movl %eax, %esi
.L18:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $4, %rbp
cmpq %r14, %rbp
je .L27
.L19:
movl 0(%rbp), %esi
movq %r13, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r15
testq %r15, %r15
je .L28
cmpb $0, 56(%r15)
je .L17
movzbl 67(%r15), %esi
jmp .L18
.L27:
movq %r12, %rdi
call _ZdaPv@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L29
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z3addPiPKi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiPKi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<iostream>
#include<cuda.h>
using namespace std;
__global__ void add(int *a,const int *b){
int i=blockIdx.x;
a[i]+=b[i];
}
int main(){
const int N=10;
int *a,*b,*temp;
temp=new int[N];
cudaMalloc(&a,N*sizeof(int));
cudaMalloc(&b,N*sizeof(int));
for(int i=0;i<N;i++)
temp[i]=i;
cudaMemcpy(a,temp,N*sizeof(int),cudaMemcpyHostToDevice);
for(int i=0;i<N;i++)
temp[i]=2*i;
cudaMemcpy(b,temp,N*sizeof(int),cudaMemcpyHostToDevice);
add<<<N,1>>>(a,b);
cudaMemcpy(temp,a,N*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++)
cout<<temp[i]<<endl;
delete[] temp;
cudaFree(a);
cudaFree(b);
} | #include<iostream>
#include<hip/hip_runtime.h>
using namespace std;
__global__ void add(int *a,const int *b){
int i=blockIdx.x;
a[i]+=b[i];
}
int main(){
const int N=10;
int *a,*b,*temp;
temp=new int[N];
hipMalloc(&a,N*sizeof(int));
hipMalloc(&b,N*sizeof(int));
for(int i=0;i<N;i++)
temp[i]=i;
hipMemcpy(a,temp,N*sizeof(int),hipMemcpyHostToDevice);
for(int i=0;i<N;i++)
temp[i]=2*i;
hipMemcpy(b,temp,N*sizeof(int),hipMemcpyHostToDevice);
add<<<N,1>>>(a,b);
hipMemcpy(temp,a,N*sizeof(int),hipMemcpyDeviceToHost);
for(int i=0;i<N;i++)
cout<<temp[i]<<endl;
delete[] temp;
hipFree(a);
hipFree(b);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include<iostream>
#include<hip/hip_runtime.h>
using namespace std;
__global__ void add(int *a,const int *b){
int i=blockIdx.x;
a[i]+=b[i];
}
int main(){
const int N=10;
int *a,*b,*temp;
temp=new int[N];
hipMalloc(&a,N*sizeof(int));
hipMalloc(&b,N*sizeof(int));
for(int i=0;i<N;i++)
temp[i]=i;
hipMemcpy(a,temp,N*sizeof(int),hipMemcpyHostToDevice);
for(int i=0;i<N;i++)
temp[i]=2*i;
hipMemcpy(b,temp,N*sizeof(int),hipMemcpyHostToDevice);
add<<<N,1>>>(a,b);
hipMemcpy(temp,a,N*sizeof(int),hipMemcpyDeviceToHost);
for(int i=0;i<N;i++)
cout<<temp[i]<<endl;
delete[] temp;
hipFree(a);
hipFree(b);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiPKi
.globl _Z3addPiPKi
.p2align 8
.type _Z3addPiPKi,@function
_Z3addPiPKi:
s_load_b128 s[0:3], s[0:1], 0x0
s_mov_b32 s4, s15
s_ashr_i32 s5, s15, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[4:5], s[4:5], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s2, s2, s4
s_addc_u32 s3, s3, s5
s_add_u32 s0, s0, s4
s_addc_u32 s1, s1, s5
s_load_b32 s2, s[2:3], 0x0
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_add_i32 s2, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiPKi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 16
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiPKi, .Lfunc_end0-_Z3addPiPKi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 16
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiPKi
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z3addPiPKi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include<iostream>
#include<hip/hip_runtime.h>
using namespace std;
__global__ void add(int *a,const int *b){
int i=blockIdx.x;
a[i]+=b[i];
}
int main(){
const int N=10;
int *a,*b,*temp;
temp=new int[N];
hipMalloc(&a,N*sizeof(int));
hipMalloc(&b,N*sizeof(int));
for(int i=0;i<N;i++)
temp[i]=i;
hipMemcpy(a,temp,N*sizeof(int),hipMemcpyHostToDevice);
for(int i=0;i<N;i++)
temp[i]=2*i;
hipMemcpy(b,temp,N*sizeof(int),hipMemcpyHostToDevice);
add<<<N,1>>>(a,b);
hipMemcpy(temp,a,N*sizeof(int),hipMemcpyDeviceToHost);
for(int i=0;i<N;i++)
cout<<temp[i]<<endl;
delete[] temp;
hipFree(a);
hipFree(b);
} | .text
.file "test.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__addPiPKi # -- Begin function _Z18__device_stub__addPiPKi
.p2align 4, 0x90
.type _Z18__device_stub__addPiPKi,@function
_Z18__device_stub__addPiPKi: # @_Z18__device_stub__addPiPKi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3addPiPKi, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiPKi, .Lfunc_end0-_Z18__device_stub__addPiPKi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $104, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $40, %edi
callq _Znam
movq %rax, %rbx
movq %rsp, %rdi
movl $40, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $40, %esi
callq hipMalloc
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, (%rbx,%rax,4)
incq %rax
cmpq $10, %rax
jne .LBB1_1
# %bb.2:
movq (%rsp), %rdi
movl $40, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_3: # =>This Inner Loop Header: Depth=1
movl %eax, (%rbx,%rax,2)
addq $2, %rax
cmpq $20, %rax
jne .LBB1_3
# %bb.4:
movq 8(%rsp), %rdi
movl $40, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdx # imm = 0x100000001
leaq 9(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_6
# %bb.5:
movq (%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiPKi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_6:
movq (%rsp), %rsi
movl $40, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r15d, %r15d
jmp .LBB1_7
.p2align 4, 0x90
.LBB1_10: # in Loop: Header=BB1_7 Depth=1
movq %r14, %rdi
movq %rax, %r12
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r12, %rax
.LBB1_11: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB1_7 Depth=1
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r15
cmpq $10, %r15
je .LBB1_12
.LBB1_7: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r15,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB1_13
# %bb.8: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB1_7 Depth=1
cmpb $0, 56(%r14)
je .LBB1_10
# %bb.9: # in Loop: Header=BB1_7 Depth=1
movzbl 67(%r14), %ecx
jmp .LBB1_11
.LBB1_12:
movq %rbx, %rdi
callq _ZdaPv
movq (%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $104, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB1_13:
.cfi_def_cfa_offset 144
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiPKi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiPKi,@object # @_Z3addPiPKi
.section .rodata,"a",@progbits
.globl _Z3addPiPKi
.p2align 3, 0x0
_Z3addPiPKi:
.quad _Z18__device_stub__addPiPKi
.size _Z3addPiPKi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z3addPiPKi"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiPKi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiPKi
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3addPiPKi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x001fc800078e0205 */
/*0050*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0205 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*0080*/ IADD3 R7, R2, R7, RZ ; /* 0x0000000702077210 */
/* 0x004fca0007ffe0ff */
/*0090*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiPKi
.globl _Z3addPiPKi
.p2align 8
.type _Z3addPiPKi,@function
_Z3addPiPKi:
s_load_b128 s[0:3], s[0:1], 0x0
s_mov_b32 s4, s15
s_ashr_i32 s5, s15, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[4:5], s[4:5], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s2, s2, s4
s_addc_u32 s3, s3, s5
s_add_u32 s0, s0, s4
s_addc_u32 s1, s1, s5
s_load_b32 s2, s[2:3], 0x0
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_add_i32 s2, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiPKi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 16
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiPKi, .Lfunc_end0-_Z3addPiPKi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 16
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiPKi
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z3addPiPKi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00152e3b_00000000-6_test.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z3addPiPKiPiPKi
.type _Z25__device_stub__Z3addPiPKiPiPKi, @function
_Z25__device_stub__Z3addPiPKiPiPKi:
.LFB3694:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3addPiPKi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z25__device_stub__Z3addPiPKiPiPKi, .-_Z25__device_stub__Z3addPiPKiPiPKi
.globl _Z3addPiPKi
.type _Z3addPiPKi, @function
_Z3addPiPKi:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z3addPiPKiPiPKi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z3addPiPKi, .-_Z3addPiPKi
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $40, %edi
call _Znam@PLT
movq %rax, %r12
movq %rsp, %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
movl $0, %eax
.L12:
movl %eax, (%r12,%rax,4)
addq $1, %rax
cmpq $10, %rax
jne .L12
movl $1, %ecx
movl $40, %edx
movq %r12, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %eax
.L13:
leal (%rax,%rax), %edx
movl %edx, (%r12,%rax,4)
addq $1, %rax
cmpq $10, %rax
jne .L13
movl $1, %ecx
movl $40, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $10, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L14:
movl $2, %ecx
movl $40, %edx
movq (%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq %r12, %rbp
leaq 40(%r12), %r14
leaq _ZSt4cout(%rip), %r13
jmp .L19
.L25:
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z25__device_stub__Z3addPiPKiPiPKi
jmp .L14
.L28:
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L26
call _ZSt16__throw_bad_castv@PLT
.L26:
call __stack_chk_fail@PLT
.L17:
movq %r15, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r15), %rax
movl $10, %esi
movq %r15, %rdi
call *48(%rax)
movl %eax, %esi
.L18:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $4, %rbp
cmpq %r14, %rbp
je .L27
.L19:
movl 0(%rbp), %esi
movq %r13, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r15
testq %r15, %r15
je .L28
cmpb $0, 56(%r15)
je .L17
movzbl 67(%r15), %esi
jmp .L18
.L27:
movq %r12, %rdi
call _ZdaPv@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L29
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z3addPiPKi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiPKi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "test.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__addPiPKi # -- Begin function _Z18__device_stub__addPiPKi
.p2align 4, 0x90
.type _Z18__device_stub__addPiPKi,@function
_Z18__device_stub__addPiPKi: # @_Z18__device_stub__addPiPKi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3addPiPKi, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiPKi, .Lfunc_end0-_Z18__device_stub__addPiPKi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $104, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $40, %edi
callq _Znam
movq %rax, %rbx
movq %rsp, %rdi
movl $40, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $40, %esi
callq hipMalloc
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, (%rbx,%rax,4)
incq %rax
cmpq $10, %rax
jne .LBB1_1
# %bb.2:
movq (%rsp), %rdi
movl $40, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_3: # =>This Inner Loop Header: Depth=1
movl %eax, (%rbx,%rax,2)
addq $2, %rax
cmpq $20, %rax
jne .LBB1_3
# %bb.4:
movq 8(%rsp), %rdi
movl $40, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdx # imm = 0x100000001
leaq 9(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_6
# %bb.5:
movq (%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiPKi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_6:
movq (%rsp), %rsi
movl $40, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r15d, %r15d
jmp .LBB1_7
.p2align 4, 0x90
.LBB1_10: # in Loop: Header=BB1_7 Depth=1
movq %r14, %rdi
movq %rax, %r12
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r12, %rax
.LBB1_11: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB1_7 Depth=1
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r15
cmpq $10, %r15
je .LBB1_12
.LBB1_7: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r15,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB1_13
# %bb.8: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB1_7 Depth=1
cmpb $0, 56(%r14)
je .LBB1_10
# %bb.9: # in Loop: Header=BB1_7 Depth=1
movzbl 67(%r14), %ecx
jmp .LBB1_11
.LBB1_12:
movq %rbx, %rdi
callq _ZdaPv
movq (%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $104, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB1_13:
.cfi_def_cfa_offset 144
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiPKi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiPKi,@object # @_Z3addPiPKi
.section .rodata,"a",@progbits
.globl _Z3addPiPKi
.p2align 3, 0x0
_Z3addPiPKi:
.quad _Z18__device_stub__addPiPKi
.size _Z3addPiPKi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z3addPiPKi"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiPKi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiPKi
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
extern "C" {
}
#define TB 256
#define EPS 0.1
#undef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#undef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
__global__ void avg_vote_kernel( float *A, float *B, int *corrAB, int patch, int c, int h, int w )
{
int _id = blockIdx.x * blockDim.x + threadIdx.x;
int size = h * w;
int radius = patch / 2;
if (_id < c * size) {
// _id = dc * size + id
int id = _id % size, dc = _id / size;
int x1 = id % w, y1 = id / w;
double sum = 0;
int cnt = 0;
for (int dx = -radius; dx <= radius; dx++) {
for (int dy = -radius; dy <= radius; dy++) {
int new_x1 = x1 + dx, new_y1 = y1 + dy;
if (new_x1 >= 0 && new_x1 < w && new_y1 >= 0 && new_y1 < h) {
int new_id1 = new_y1 * w + new_x1;
int x2 = corrAB[new_id1 * 2 + 0];
int y2 = corrAB[new_id1 * 2 + 1];
int new_x2 = x2 - dx, new_y2 = y2 - dy;
if (new_x2 >= 0 && new_x2 < w && new_y2 >= 0 && new_y2 < h) {
int new_id2 = new_y2 * w + new_x2;
sum += A[dc * size + new_id2];
cnt++;
}
}
}
}
if (cnt != 0)
B[dc * size + id] = sum / cnt;
}
return ;
} | .file "tmpxft_0002a7e3_00000000-6_avg_vote_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z43__device_stub__Z15avg_vote_kernelPfS_PiiiiiPfS_Piiiii
.type _Z43__device_stub__Z15avg_vote_kernelPfS_PiiiiiPfS_Piiiii, @function
_Z43__device_stub__Z15avg_vote_kernelPfS_PiiiiiPfS_Piiiii:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z15avg_vote_kernelPfS_Piiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z43__device_stub__Z15avg_vote_kernelPfS_PiiiiiPfS_Piiiii, .-_Z43__device_stub__Z15avg_vote_kernelPfS_PiiiiiPfS_Piiiii
.globl _Z15avg_vote_kernelPfS_Piiiii
.type _Z15avg_vote_kernelPfS_Piiiii, @function
_Z15avg_vote_kernelPfS_Piiiii:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z43__device_stub__Z15avg_vote_kernelPfS_PiiiiiPfS_Piiiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15avg_vote_kernelPfS_Piiiii, .-_Z15avg_vote_kernelPfS_Piiiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z15avg_vote_kernelPfS_Piiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z15avg_vote_kernelPfS_Piiiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
extern "C" {
}
#define TB 256
#define EPS 0.1
#undef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#undef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
__global__ void avg_vote_kernel( float *A, float *B, int *corrAB, int patch, int c, int h, int w )
{
int _id = blockIdx.x * blockDim.x + threadIdx.x;
int size = h * w;
int radius = patch / 2;
if (_id < c * size) {
// _id = dc * size + id
int id = _id % size, dc = _id / size;
int x1 = id % w, y1 = id / w;
double sum = 0;
int cnt = 0;
for (int dx = -radius; dx <= radius; dx++) {
for (int dy = -radius; dy <= radius; dy++) {
int new_x1 = x1 + dx, new_y1 = y1 + dy;
if (new_x1 >= 0 && new_x1 < w && new_y1 >= 0 && new_y1 < h) {
int new_id1 = new_y1 * w + new_x1;
int x2 = corrAB[new_id1 * 2 + 0];
int y2 = corrAB[new_id1 * 2 + 1];
int new_x2 = x2 - dx, new_y2 = y2 - dy;
if (new_x2 >= 0 && new_x2 < w && new_y2 >= 0 && new_y2 < h) {
int new_id2 = new_y2 * w + new_x2;
sum += A[dc * size + new_id2];
cnt++;
}
}
}
}
if (cnt != 0)
B[dc * size + id] = sum / cnt;
}
return ;
} | #include <hip/hip_runtime.h>
#include "includes.h"
extern "C" {
}
#define TB 256
#define EPS 0.1
#undef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#undef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
__global__ void avg_vote_kernel( float *A, float *B, int *corrAB, int patch, int c, int h, int w )
{
int _id = blockIdx.x * blockDim.x + threadIdx.x;
int size = h * w;
int radius = patch / 2;
if (_id < c * size) {
// _id = dc * size + id
int id = _id % size, dc = _id / size;
int x1 = id % w, y1 = id / w;
double sum = 0;
int cnt = 0;
for (int dx = -radius; dx <= radius; dx++) {
for (int dy = -radius; dy <= radius; dy++) {
int new_x1 = x1 + dx, new_y1 = y1 + dy;
if (new_x1 >= 0 && new_x1 < w && new_y1 >= 0 && new_y1 < h) {
int new_id1 = new_y1 * w + new_x1;
int x2 = corrAB[new_id1 * 2 + 0];
int y2 = corrAB[new_id1 * 2 + 1];
int new_x2 = x2 - dx, new_y2 = y2 - dy;
if (new_x2 >= 0 && new_x2 < w && new_y2 >= 0 && new_y2 < h) {
int new_id2 = new_y2 * w + new_x2;
sum += A[dc * size + new_id2];
cnt++;
}
}
}
}
if (cnt != 0)
B[dc * size + id] = sum / cnt;
}
return ;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
extern "C" {
}
#define TB 256
#define EPS 0.1
#undef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#undef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
__global__ void avg_vote_kernel( float *A, float *B, int *corrAB, int patch, int c, int h, int w )
{
int _id = blockIdx.x * blockDim.x + threadIdx.x;
int size = h * w;
int radius = patch / 2;
if (_id < c * size) {
// _id = dc * size + id
int id = _id % size, dc = _id / size;
int x1 = id % w, y1 = id / w;
double sum = 0;
int cnt = 0;
for (int dx = -radius; dx <= radius; dx++) {
for (int dy = -radius; dy <= radius; dy++) {
int new_x1 = x1 + dx, new_y1 = y1 + dy;
if (new_x1 >= 0 && new_x1 < w && new_y1 >= 0 && new_y1 < h) {
int new_id1 = new_y1 * w + new_x1;
int x2 = corrAB[new_id1 * 2 + 0];
int y2 = corrAB[new_id1 * 2 + 1];
int new_x2 = x2 - dx, new_y2 = y2 - dy;
if (new_x2 >= 0 && new_x2 < w && new_y2 >= 0 && new_y2 < h) {
int new_id2 = new_y2 * w + new_x2;
sum += A[dc * size + new_id2];
cnt++;
}
}
}
}
if (cnt != 0)
B[dc * size + id] = sum / cnt;
}
return ;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15avg_vote_kernelPfS_Piiiii
.globl _Z15avg_vote_kernelPfS_Piiiii
.p2align 8
.type _Z15avg_vote_kernelPfS_Piiiii,@function
_Z15avg_vote_kernelPfS_Piiiii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x34
s_load_b64 s[6:7], s[0:1], 0x1c
s_load_b32 s13, s[0:1], 0x24
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[3:4], null, s15, s2, v[0:1]
s_mul_i32 s12, s13, s7
s_mul_i32 s2, s12, s6
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v3
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_17
s_ashr_i32 s2, s12, 31
v_ashrrev_i32_e32 v2, 31, v3
s_add_i32 s3, s12, s2
v_mov_b32_e32 v7, 0
s_xor_b32 s3, s3, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_cvt_f32_u32_e32 v0, s3
s_sub_i32 s4, 0, s3
v_add_nc_u32_e32 v4, v3, v2
v_rcp_iflag_f32_e32 v0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_xor_b32_e32 v4, v4, v2
v_xor_b32_e32 v2, s2, v2
s_load_b32 s2, s[0:1], 0x18
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x4f7ffffe, v0
v_cvt_u32_f32_e32 v0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, s4, v0
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, -1
v_mul_hi_u32 v1, v0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v0, v0, v1
v_mul_hi_u32 v0, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, v0, s3
v_sub_nc_u32_e32 v1, v4, v1
v_add_nc_u32_e32 v4, 1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s3, v1
v_cmp_le_u32_e32 vcc_lo, s3, v1
v_dual_cndmask_b32 v0, v0, v4 :: v_dual_cndmask_b32 v1, v1, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v4, 1, v0
v_cmp_le_u32_e32 vcc_lo, s3, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v0, v0, v4, vcc_lo
v_xor_b32_e32 v0, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_sub_nc_u32_e32 v5, v0, v2
v_mov_b32_e32 v1, 0
v_mov_b32_e32 v2, 0
v_mul_lo_u32 v6, v5, s12
s_delay_alu instid0(VALU_DEP_1)
v_sub_nc_u32_e32 v0, v3, v6
s_cbranch_scc1 .LBB0_15
s_ashr_i32 s3, s13, 31
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v0
s_add_i32 s4, s13, s3
s_clause 0x1
s_load_b64 s[8:9], s[0:1], 0x0
s_load_b64 s[10:11], s[0:1], 0x10
s_xor_b32 s4, s4, s3
s_lshl_b32 s16, s13, 1
v_cvt_f32_u32_e32 v1, s4
s_sub_i32 s5, 0, s4
v_add_nc_u32_e32 v7, v0, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v1, v1
v_xor_b32_e32 v7, v7, v4
v_xor_b32_e32 v4, s3, v4
s_not_b32 s3, s13
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v1, v1
v_mul_lo_u32 v2, s5, v1
s_lshr_b32 s5, s2, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s2, s2, s5
s_ashr_i32 s6, s2, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_abs_i32 s14, s6
v_mul_hi_u32 v2, v1, v2
s_sub_i32 s17, 0, s6
s_not_b32 s15, s14
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v8, v1, v2
v_mad_u64_u32 v[1:2], null, v7, v8, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, v2, s4
v_sub_nc_u32_e32 v1, v7, v1
v_add_nc_u32_e32 v7, 1, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v8, s4, v1
v_cmp_le_u32_e32 vcc_lo, s4, v1
v_dual_cndmask_b32 v2, v2, v7 :: v_dual_cndmask_b32 v1, v1, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v7, 1, v2
v_cmp_le_u32_e32 vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v1, v2, v7, vcc_lo
v_xor_b32_e32 v7, v1, v4
v_mad_u64_u32 v[1:2], null, s6, s3, v[3:4]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v3, v7, v4
v_sub_nc_u32_e32 v7, v1, v6
s_delay_alu instid0(VALU_DEP_2)
v_mul_lo_u32 v4, v3, s13
v_mov_b32_e32 v1, 0
v_mov_b32_e32 v2, 0
v_subrev_nc_u32_e32 v9, s6, v3
v_lshl_or_b32 v8, v7, 1, 1
v_mov_b32_e32 v7, 0
v_sub_nc_u32_e32 v10, v0, v4
.LBB0_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_dual_mov_b32 v3, v8 :: v_dual_add_nc_u32 v4, s17, v10
v_mov_b32_e32 v11, v9
s_mov_b32 s18, s6
v_cmp_lt_i32_e32 vcc_lo, -1, v4
v_cmp_gt_i32_e64 s2, s13, v4
s_branch .LBB0_8
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s4
.LBB0_5:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s21
.LBB0_6:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s20
.LBB0_7:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s19
v_add_nc_u32_e32 v11, 1, v11
v_add_nc_u32_e32 v3, s16, v3
s_add_i32 s18, s18, -1
s_cmp_eq_u32 s15, s18
s_cbranch_scc1 .LBB0_13
.LBB0_8:
s_and_saveexec_b32 s19, vcc_lo
s_cbranch_execz .LBB0_7
v_cmp_lt_i32_e64 s3, -1, v11
v_cmp_gt_i32_e64 s4, s7, v11
s_delay_alu instid0(VALU_DEP_2)
s_and_b32 s3, s2, s3
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s3, s4
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s20, s3
s_cbranch_execz .LBB0_6
v_add_nc_u32_e32 v12, -1, v3
s_mov_b32 s21, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v13, 31, v12
v_lshlrev_b64 v[12:13], 2, v[12:13]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v12, s3, s10, v12
v_add_co_ci_u32_e64 v13, s3, s11, v13, s3
global_load_b32 v4, v[12:13], off
s_waitcnt vmcnt(0)
v_subrev_nc_u32_e32 v12, s17, v4
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_lt_i32_e32 -1, v12
s_cbranch_execz .LBB0_5
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[13:14], 2, v[3:4]
v_add_co_u32 v13, s3, s10, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v14, s3, s11, v14, s3
v_cmp_gt_i32_e64 s3, s13, v12
global_load_b32 v4, v[13:14], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v4, s18, v4
v_cmp_gt_i32_e64 s4, s7, v4
v_cmp_lt_i32_e64 s5, -1, v4
s_delay_alu instid0(VALU_DEP_2)
s_and_b32 s3, s3, s4
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s3, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s4, s3
s_cbranch_execz .LBB0_4
v_mul_lo_u32 v4, v4, s13
v_add_nc_u32_e32 v7, 1, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v12, v12, v6, v4
v_ashrrev_i32_e32 v13, 31, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[12:13], 2, v[12:13]
v_add_co_u32 v12, s3, s8, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v13, s3, s9, v13, s3
global_load_b32 v4, v[12:13], off
s_waitcnt vmcnt(0)
v_cvt_f64_f32_e32 v[12:13], v4
v_add_f64 v[1:2], v[1:2], v[12:13]
s_branch .LBB0_4
.LBB0_13:
v_add_nc_u32_e32 v8, 2, v8
s_add_i32 s2, s17, 1
s_cmp_eq_u32 s17, s14
s_cbranch_scc1 .LBB0_15
s_mov_b32 s17, s2
s_branch .LBB0_3
.LBB0_15:
v_cmp_ne_u32_e32 vcc_lo, 0, v7
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_17
v_cvt_f64_i32_e32 v[3:4], v7
s_load_b64 s[0:1], s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f64 v[6:7], null, v[3:4], v[3:4], v[1:2]
v_rcp_f64_e32 v[8:9], v[6:7]
s_waitcnt_depctr 0xfff
v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
v_div_scale_f64 v[10:11], vcc_lo, v[1:2], v[3:4], v[1:2]
v_mul_f64 v[12:13], v[10:11], v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[6:7], -v[6:7], v[12:13], v[10:11]
v_div_fmas_f64 v[6:7], v[6:7], v[8:9], v[12:13]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f64 v[1:2], v[6:7], v[3:4], v[1:2]
v_cvt_f32_f64_e32 v3, v[1:2]
v_mad_u64_u32 v[1:2], null, v5, s12, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v3, off
.LBB0_17:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15avg_vote_kernelPfS_Piiiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 22
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15avg_vote_kernelPfS_Piiiii, .Lfunc_end0-_Z15avg_vote_kernelPfS_Piiiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15avg_vote_kernelPfS_Piiiii
.private_segment_fixed_size: 0
.sgpr_count: 24
.sgpr_spill_count: 0
.symbol: _Z15avg_vote_kernelPfS_Piiiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
extern "C" {
}
#define TB 256
#define EPS 0.1
#undef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#undef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
__global__ void avg_vote_kernel( float *A, float *B, int *corrAB, int patch, int c, int h, int w )
{
int _id = blockIdx.x * blockDim.x + threadIdx.x;
int size = h * w;
int radius = patch / 2;
if (_id < c * size) {
// _id = dc * size + id
int id = _id % size, dc = _id / size;
int x1 = id % w, y1 = id / w;
double sum = 0;
int cnt = 0;
for (int dx = -radius; dx <= radius; dx++) {
for (int dy = -radius; dy <= radius; dy++) {
int new_x1 = x1 + dx, new_y1 = y1 + dy;
if (new_x1 >= 0 && new_x1 < w && new_y1 >= 0 && new_y1 < h) {
int new_id1 = new_y1 * w + new_x1;
int x2 = corrAB[new_id1 * 2 + 0];
int y2 = corrAB[new_id1 * 2 + 1];
int new_x2 = x2 - dx, new_y2 = y2 - dy;
if (new_x2 >= 0 && new_x2 < w && new_y2 >= 0 && new_y2 < h) {
int new_id2 = new_y2 * w + new_x2;
sum += A[dc * size + new_id2];
cnt++;
}
}
}
}
if (cnt != 0)
B[dc * size + id] = sum / cnt;
}
return ;
} | .text
.file "avg_vote_kernel.hip"
.globl _Z30__device_stub__avg_vote_kernelPfS_Piiiii # -- Begin function _Z30__device_stub__avg_vote_kernelPfS_Piiiii
.p2align 4, 0x90
.type _Z30__device_stub__avg_vote_kernelPfS_Piiiii,@function
_Z30__device_stub__avg_vote_kernelPfS_Piiiii: # @_Z30__device_stub__avg_vote_kernelPfS_Piiiii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15avg_vote_kernelPfS_Piiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z30__device_stub__avg_vote_kernelPfS_Piiiii, .Lfunc_end0-_Z30__device_stub__avg_vote_kernelPfS_Piiiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15avg_vote_kernelPfS_Piiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15avg_vote_kernelPfS_Piiiii,@object # @_Z15avg_vote_kernelPfS_Piiiii
.section .rodata,"a",@progbits
.globl _Z15avg_vote_kernelPfS_Piiiii
.p2align 3, 0x0
_Z15avg_vote_kernelPfS_Piiiii:
.quad _Z30__device_stub__avg_vote_kernelPfS_Piiiii
.size _Z15avg_vote_kernelPfS_Piiiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15avg_vote_kernelPfS_Piiiii"
.size .L__unnamed_1, 30
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__avg_vote_kernelPfS_Piiiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15avg_vote_kernelPfS_Piiiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0002a7e3_00000000-6_avg_vote_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z43__device_stub__Z15avg_vote_kernelPfS_PiiiiiPfS_Piiiii
.type _Z43__device_stub__Z15avg_vote_kernelPfS_PiiiiiPfS_Piiiii, @function
_Z43__device_stub__Z15avg_vote_kernelPfS_PiiiiiPfS_Piiiii:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z15avg_vote_kernelPfS_Piiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z43__device_stub__Z15avg_vote_kernelPfS_PiiiiiPfS_Piiiii, .-_Z43__device_stub__Z15avg_vote_kernelPfS_PiiiiiPfS_Piiiii
.globl _Z15avg_vote_kernelPfS_Piiiii
.type _Z15avg_vote_kernelPfS_Piiiii, @function
_Z15avg_vote_kernelPfS_Piiiii:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z43__device_stub__Z15avg_vote_kernelPfS_PiiiiiPfS_Piiiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15avg_vote_kernelPfS_Piiiii, .-_Z15avg_vote_kernelPfS_Piiiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z15avg_vote_kernelPfS_Piiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z15avg_vote_kernelPfS_Piiiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "avg_vote_kernel.hip"
.globl _Z30__device_stub__avg_vote_kernelPfS_Piiiii # -- Begin function _Z30__device_stub__avg_vote_kernelPfS_Piiiii
.p2align 4, 0x90
.type _Z30__device_stub__avg_vote_kernelPfS_Piiiii,@function
_Z30__device_stub__avg_vote_kernelPfS_Piiiii: # @_Z30__device_stub__avg_vote_kernelPfS_Piiiii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15avg_vote_kernelPfS_Piiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z30__device_stub__avg_vote_kernelPfS_Piiiii, .Lfunc_end0-_Z30__device_stub__avg_vote_kernelPfS_Piiiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15avg_vote_kernelPfS_Piiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15avg_vote_kernelPfS_Piiiii,@object # @_Z15avg_vote_kernelPfS_Piiiii
.section .rodata,"a",@progbits
.globl _Z15avg_vote_kernelPfS_Piiiii
.p2align 3, 0x0
_Z15avg_vote_kernelPfS_Piiiii:
.quad _Z30__device_stub__avg_vote_kernelPfS_Piiiii
.size _Z15avg_vote_kernelPfS_Piiiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15avg_vote_kernelPfS_Piiiii"
.size .L__unnamed_1, 30
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__avg_vote_kernelPfS_Piiiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15avg_vote_kernelPfS_Piiiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Jiang Yufan (email: jiangyufan2018@outlook.com) 2019-03-20
*/
#include "DropoutWithIndex.cuh"
#include "../XDevice.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
__global__
/*
This is a special implementation of "dropout" to reduce memory with maskIndex.
>> tData - the data pointer of the target tensor
>> sIndex - mask index
>> size - the size of the sIndex
*/
void KernelDropoutWithIndex1D(DTYPE * tData, int * sIndex, int size)
{
/* block id */
int i = blockDim.x * blockIdx.x + threadIdx.x;
DTYPE * t = tData;
if (i < size) {
int id = sIndex[i];
t[id] = DTYPE(0.0F);
}
}
/*
This is a special implementation of "dropout" to reduce memory with maskIndex.
>> x - input tensor
>> maskIndex - mask index tensor
>> c - output tensor
*/
void _CudaDropoutWithIndex(const XTensor * x, XTensor * maskIndex, XTensor * c)
{
int devID = c->devID;
int blockNum = maskIndex->unitNum;
int cudaGrids[3];
int cudaBlocks[3];
int devIDBackup;
ProtectCudaDev(devID, devIDBackup);
GDevs.GetCudaThread(devID, blockNum, cudaGrids, cudaBlocks);
dim3 blocks(cudaGrids[0]);
dim3 threads(cudaBlocks[0]);
DTYPE * tData = (DTYPE*)c->data;
int * sIndex = NULL;
sIndex = (int *)maskIndex->data;
KernelDropoutWithIndex1D <<<blocks, threads >>>(tData, sIndex, blockNum);
BacktoCudaDev(devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Jiang Yufan (email: jiangyufan2018@outlook.com) 2019-03-20
*/
#include "DropoutWithIndex.cuh"
#include "../XDevice.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
__global__
/*
This is a special implementation of "dropout" to reduce memory with maskIndex.
>> tData - the data pointer of the target tensor
>> sIndex - mask index
>> size - the size of the sIndex
*/
void KernelDropoutWithIndex1D(DTYPE * tData, int * sIndex, int size)
{
/* block id */
int i = blockDim.x * blockIdx.x + threadIdx.x;
DTYPE * t = tData;
if (i < size) {
int id = sIndex[i];
t[id] = DTYPE(0.0F);
}
}
/*
This is a special implementation of "dropout" to reduce memory with maskIndex.
>> x - input tensor
>> maskIndex - mask index tensor
>> c - output tensor
*/
void _CudaDropoutWithIndex(const XTensor * x, XTensor * maskIndex, XTensor * c)
{
int devID = c->devID;
int blockNum = maskIndex->unitNum;
int cudaGrids[3];
int cudaBlocks[3];
int devIDBackup;
ProtectCudaDev(devID, devIDBackup);
GDevs.GetCudaThread(devID, blockNum, cudaGrids, cudaBlocks);
dim3 blocks(cudaGrids[0]);
dim3 threads(cudaBlocks[0]);
DTYPE * tData = (DTYPE*)c->data;
int * sIndex = NULL;
sIndex = (int *)maskIndex->data;
KernelDropoutWithIndex1D <<<blocks, threads >>>(tData, sIndex, blockNum);
BacktoCudaDev(devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) | .file "tmpxft_00030f30_00000000-6_DropoutWithIndex.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3094:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3094:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3117:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3117:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Jiang Yufan (email: jiangyufan2018@outlook.com) 2019-03-20
*/
#include "DropoutWithIndex.cuh"
#include "../XDevice.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
__global__
/*
This is a special implementation of "dropout" to reduce memory with maskIndex.
>> tData - the data pointer of the target tensor
>> sIndex - mask index
>> size - the size of the sIndex
*/
void KernelDropoutWithIndex1D(DTYPE * tData, int * sIndex, int size)
{
/* block id */
int i = blockDim.x * blockIdx.x + threadIdx.x;
DTYPE * t = tData;
if (i < size) {
int id = sIndex[i];
t[id] = DTYPE(0.0F);
}
}
/*
This is a special implementation of "dropout" to reduce memory with maskIndex.
>> x - input tensor
>> maskIndex - mask index tensor
>> c - output tensor
*/
void _CudaDropoutWithIndex(const XTensor * x, XTensor * maskIndex, XTensor * c)
{
int devID = c->devID;
int blockNum = maskIndex->unitNum;
int cudaGrids[3];
int cudaBlocks[3];
int devIDBackup;
ProtectCudaDev(devID, devIDBackup);
GDevs.GetCudaThread(devID, blockNum, cudaGrids, cudaBlocks);
dim3 blocks(cudaGrids[0]);
dim3 threads(cudaBlocks[0]);
DTYPE * tData = (DTYPE*)c->data;
int * sIndex = NULL;
sIndex = (int *)maskIndex->data;
KernelDropoutWithIndex1D <<<blocks, threads >>>(tData, sIndex, blockNum);
BacktoCudaDev(devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Jiang Yufan (email: jiangyufan2018@outlook.com) 2019-03-20
*/
#ifndef __DROPOUTWITHINDEX_CUH__
#define __DROPOUTWITHINDEX_CUH__
#include "../XTensor.h"
#include "DropoutWithIndex.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* dropout with index (cuda version) */
void _CudaDropoutWithIndex(const XTensor * x, XTensor * maskIndex, XTensor * c);
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
#endif // __DROPOUTWITHINDEX_CUH__ |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Jiang Yufan (email: jiangyufan2018@outlook.com) 2019-03-20
*/
#ifndef __DROPOUTWITHINDEX_CUH__
#define __DROPOUTWITHINDEX_CUH__
#include "../XTensor.h"
#include "DropoutWithIndex.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* dropout with index (cuda version) */
void _CudaDropoutWithIndex(const XTensor * x, XTensor * maskIndex, XTensor * c);
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
#endif // __DROPOUTWITHINDEX_CUH__ | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Jiang Yufan (email: jiangyufan2018@outlook.com) 2019-03-20
*/
#ifndef __DROPOUTWITHINDEX_CUH__
#define __DROPOUTWITHINDEX_CUH__
#include "../XTensor.h"
#include "DropoutWithIndex.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* dropout with index (cuda version) */
void _CudaDropoutWithIndex(const XTensor * x, XTensor * maskIndex, XTensor * c);
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
#endif // __DROPOUTWITHINDEX_CUH__ | .text
.file "DropoutWithIndex.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00030f30_00000000-6_DropoutWithIndex.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3094:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3094:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3117:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3117:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "DropoutWithIndex.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#define N 500
__global__ void VecAdd(int* DA, int* DB, int* DC)
{
int i = threadIdx.x;
DC[i] = DA[i] + DB[i];
}
int main()
{ int HA[N], HB[N], HC[N];
int *DA, *DB, *DC;
int i; int size = N*sizeof(int);
// reservamos espacio en la memoria global del device
cudaMalloc((void**)&DA, size);
cudaMalloc((void**)&DB, size);
cudaMalloc((void**)&DC, size);
// inicializamos HA y HB
for (i=0; i<N; i++) {HA[i]=-i; HB[i] = 3*i;}
// copiamos HA y HB del host a DA y DB en el device, respectivamente
cudaMemcpy(DA, HA, size, cudaMemcpyHostToDevice);
cudaMemcpy(DB, HB, size, cudaMemcpyHostToDevice);
// llamamos al kernel (1 bloque de N hilos)
VecAdd <<<1, N>>>(DA, DB, DC); // N hilos ejecutan el kernel en paralelo
// copiamos el resultado, que está en la memoria global del device, (DC) al host (a HC)
cudaMemcpy(HC, DC, size, cudaMemcpyDeviceToHost);
// liberamos la memoria reservada en el device
cudaFree(DA); cudaFree(DB); cudaFree(DC);
// una vez que tenemos los resultados en el host, comprobamos que son correctos
// esta comprobación debe quitarse una vez que el programa es correcto (p. ej., para medir el tiempo de ejecución)
for (i = 0; i < N; i++) // printf("%d + %d = %d\n",HA[i],HB[i],HC[i]);
if (HC[i]!= (HA[i]+HB[i]))
{printf("error en componente %d\n", i); break;}
return 0;
} | code for sm_80
Function : _Z6VecAddPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#define N 500
__global__ void VecAdd(int* DA, int* DB, int* DC)
{
int i = threadIdx.x;
DC[i] = DA[i] + DB[i];
}
int main()
{ int HA[N], HB[N], HC[N];
int *DA, *DB, *DC;
int i; int size = N*sizeof(int);
// reservamos espacio en la memoria global del device
cudaMalloc((void**)&DA, size);
cudaMalloc((void**)&DB, size);
cudaMalloc((void**)&DC, size);
// inicializamos HA y HB
for (i=0; i<N; i++) {HA[i]=-i; HB[i] = 3*i;}
// copiamos HA y HB del host a DA y DB en el device, respectivamente
cudaMemcpy(DA, HA, size, cudaMemcpyHostToDevice);
cudaMemcpy(DB, HB, size, cudaMemcpyHostToDevice);
// llamamos al kernel (1 bloque de N hilos)
VecAdd <<<1, N>>>(DA, DB, DC); // N hilos ejecutan el kernel en paralelo
// copiamos el resultado, que está en la memoria global del device, (DC) al host (a HC)
cudaMemcpy(HC, DC, size, cudaMemcpyDeviceToHost);
// liberamos la memoria reservada en el device
cudaFree(DA); cudaFree(DB); cudaFree(DC);
// una vez que tenemos los resultados en el host, comprobamos que son correctos
// esta comprobación debe quitarse una vez que el programa es correcto (p. ej., para medir el tiempo de ejecución)
for (i = 0; i < N; i++) // printf("%d + %d = %d\n",HA[i],HB[i],HC[i]);
if (HC[i]!= (HA[i]+HB[i]))
{printf("error en componente %d\n", i); break;}
return 0;
} | .file "tmpxft_00131469_00000000-6_suma-vectores1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z6VecAddPiS_S_PiS_S_
.type _Z29__device_stub__Z6VecAddPiS_S_PiS_S_, @function
_Z29__device_stub__Z6VecAddPiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6VecAddPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z29__device_stub__Z6VecAddPiS_S_PiS_S_, .-_Z29__device_stub__Z6VecAddPiS_S_PiS_S_
.globl _Z6VecAddPiS_S_
.type _Z6VecAddPiS_S_, @function
_Z6VecAddPiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z6VecAddPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6VecAddPiS_S_, .-_Z6VecAddPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "error en componente %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $4096, %rsp
.cfi_def_cfa_offset 4104
orq $0, (%rsp)
subq $1976, %rsp
.cfi_def_cfa_offset 6080
movq %fs:40, %rax
movq %rax, 6056(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $2000, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $2000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $2000, %esi
call cudaMalloc@PLT
movl $0, %eax
.L12:
movl %eax, %edx
negl %edx
movl %edx, 48(%rsp,%rax,4)
leal (%rax,%rax,2), %edx
movl %edx, 2048(%rsp,%rax,4)
addq $1, %rax
cmpq $500, %rax
jne .L12
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $2000, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 2048(%rsp), %rsi
movl $1, %ecx
movl $2000, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $500, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L21
.L13:
leaq 4048(%rsp), %rdi
movl $2, %ecx
movl $2000, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movl $0, %edx
leaq 4048(%rsp), %rcx
.L16:
movl 2048(%rsp,%rdx,4), %eax
addl 48(%rsp,%rdx,4), %eax
cmpl %eax, (%rcx,%rdx,4)
jne .L22
addq $1, %rdx
cmpq $500, %rdx
jne .L16
jmp .L15
.L21:
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z29__device_stub__Z6VecAddPiS_S_PiS_S_
jmp .L13
.L22:
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L15:
movq 6056(%rsp), %rax
subq %fs:40, %rax
jne .L23
movl $0, %eax
addq $6072, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z6VecAddPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z6VecAddPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#define N 500
__global__ void VecAdd(int* DA, int* DB, int* DC)
{
int i = threadIdx.x;
DC[i] = DA[i] + DB[i];
}
int main()
{ int HA[N], HB[N], HC[N];
int *DA, *DB, *DC;
int i; int size = N*sizeof(int);
// reservamos espacio en la memoria global del device
cudaMalloc((void**)&DA, size);
cudaMalloc((void**)&DB, size);
cudaMalloc((void**)&DC, size);
// inicializamos HA y HB
for (i=0; i<N; i++) {HA[i]=-i; HB[i] = 3*i;}
// copiamos HA y HB del host a DA y DB en el device, respectivamente
cudaMemcpy(DA, HA, size, cudaMemcpyHostToDevice);
cudaMemcpy(DB, HB, size, cudaMemcpyHostToDevice);
// llamamos al kernel (1 bloque de N hilos)
VecAdd <<<1, N>>>(DA, DB, DC); // N hilos ejecutan el kernel en paralelo
// copiamos el resultado, que está en la memoria global del device, (DC) al host (a HC)
cudaMemcpy(HC, DC, size, cudaMemcpyDeviceToHost);
// liberamos la memoria reservada en el device
cudaFree(DA); cudaFree(DB); cudaFree(DC);
// una vez que tenemos los resultados en el host, comprobamos que son correctos
// esta comprobación debe quitarse una vez que el programa es correcto (p. ej., para medir el tiempo de ejecución)
for (i = 0; i < N; i++) // printf("%d + %d = %d\n",HA[i],HB[i],HC[i]);
if (HC[i]!= (HA[i]+HB[i]))
{printf("error en componente %d\n", i); break;}
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#define N 500
__global__ void VecAdd(int* DA, int* DB, int* DC)
{
int i = threadIdx.x;
DC[i] = DA[i] + DB[i];
}
int main()
{ int HA[N], HB[N], HC[N];
int *DA, *DB, *DC;
int i; int size = N*sizeof(int);
// reservamos espacio en la memoria global del device
hipMalloc((void**)&DA, size);
hipMalloc((void**)&DB, size);
hipMalloc((void**)&DC, size);
// inicializamos HA y HB
for (i=0; i<N; i++) {HA[i]=-i; HB[i] = 3*i;}
// copiamos HA y HB del host a DA y DB en el device, respectivamente
hipMemcpy(DA, HA, size, hipMemcpyHostToDevice);
hipMemcpy(DB, HB, size, hipMemcpyHostToDevice);
// llamamos al kernel (1 bloque de N hilos)
VecAdd <<<1, N>>>(DA, DB, DC); // N hilos ejecutan el kernel en paralelo
// copiamos el resultado, que está en la memoria global del device, (DC) al host (a HC)
hipMemcpy(HC, DC, size, hipMemcpyDeviceToHost);
// liberamos la memoria reservada en el device
hipFree(DA); hipFree(DB); hipFree(DC);
// una vez que tenemos los resultados en el host, comprobamos que son correctos
// esta comprobación debe quitarse una vez que el programa es correcto (p. ej., para medir el tiempo de ejecución)
for (i = 0; i < N; i++) // printf("%d + %d = %d\n",HA[i],HB[i],HC[i]);
if (HC[i]!= (HA[i]+HB[i]))
{printf("error en componente %d\n", i); break;}
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#define N 500
__global__ void VecAdd(int* DA, int* DB, int* DC)
{
int i = threadIdx.x;
DC[i] = DA[i] + DB[i];
}
int main()
{ int HA[N], HB[N], HC[N];
int *DA, *DB, *DC;
int i; int size = N*sizeof(int);
// reservamos espacio en la memoria global del device
hipMalloc((void**)&DA, size);
hipMalloc((void**)&DB, size);
hipMalloc((void**)&DC, size);
// inicializamos HA y HB
for (i=0; i<N; i++) {HA[i]=-i; HB[i] = 3*i;}
// copiamos HA y HB del host a DA y DB en el device, respectivamente
hipMemcpy(DA, HA, size, hipMemcpyHostToDevice);
hipMemcpy(DB, HB, size, hipMemcpyHostToDevice);
// llamamos al kernel (1 bloque de N hilos)
VecAdd <<<1, N>>>(DA, DB, DC); // N hilos ejecutan el kernel en paralelo
// copiamos el resultado, que está en la memoria global del device, (DC) al host (a HC)
hipMemcpy(HC, DC, size, hipMemcpyDeviceToHost);
// liberamos la memoria reservada en el device
hipFree(DA); hipFree(DB); hipFree(DC);
// una vez que tenemos los resultados en el host, comprobamos que son correctos
// esta comprobación debe quitarse una vez que el programa es correcto (p. ej., para medir el tiempo de ejecución)
for (i = 0; i < N; i++) // printf("%d + %d = %d\n",HA[i],HB[i],HC[i]);
if (HC[i]!= (HA[i]+HB[i]))
{printf("error en componente %d\n", i); break;}
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6VecAddPiS_S_
.globl _Z6VecAddPiS_S_
.p2align 8
.type _Z6VecAddPiS_S_,@function
_Z6VecAddPiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6VecAddPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6VecAddPiS_S_, .Lfunc_end0-_Z6VecAddPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6VecAddPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z6VecAddPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#define N 500
__global__ void VecAdd(int* DA, int* DB, int* DC)
{
int i = threadIdx.x;
DC[i] = DA[i] + DB[i];
}
int main()
{ int HA[N], HB[N], HC[N];
int *DA, *DB, *DC;
int i; int size = N*sizeof(int);
// reservamos espacio en la memoria global del device
hipMalloc((void**)&DA, size);
hipMalloc((void**)&DB, size);
hipMalloc((void**)&DC, size);
// inicializamos HA y HB
for (i=0; i<N; i++) {HA[i]=-i; HB[i] = 3*i;}
// copiamos HA y HB del host a DA y DB en el device, respectivamente
hipMemcpy(DA, HA, size, hipMemcpyHostToDevice);
hipMemcpy(DB, HB, size, hipMemcpyHostToDevice);
// llamamos al kernel (1 bloque de N hilos)
VecAdd <<<1, N>>>(DA, DB, DC); // N hilos ejecutan el kernel en paralelo
// copiamos el resultado, que está en la memoria global del device, (DC) al host (a HC)
hipMemcpy(HC, DC, size, hipMemcpyDeviceToHost);
// liberamos la memoria reservada en el device
hipFree(DA); hipFree(DB); hipFree(DC);
// una vez que tenemos los resultados en el host, comprobamos que son correctos
// esta comprobación debe quitarse una vez que el programa es correcto (p. ej., para medir el tiempo de ejecución)
for (i = 0; i < N; i++) // printf("%d + %d = %d\n",HA[i],HB[i],HC[i]);
if (HC[i]!= (HA[i]+HB[i]))
{printf("error en componente %d\n", i); break;}
return 0;
} | .text
.file "suma-vectores1.hip"
.globl _Z21__device_stub__VecAddPiS_S_ # -- Begin function _Z21__device_stub__VecAddPiS_S_
.p2align 4, 0x90
.type _Z21__device_stub__VecAddPiS_S_,@function
_Z21__device_stub__VecAddPiS_S_: # @_Z21__device_stub__VecAddPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6VecAddPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z21__device_stub__VecAddPiS_S_, .Lfunc_end0-_Z21__device_stub__VecAddPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $6104, %rsp # imm = 0x17D8
.cfi_def_cfa_offset 6112
leaq 16(%rsp), %rdi
movl $2000, %esi # imm = 0x7D0
callq hipMalloc
leaq 8(%rsp), %rdi
movl $2000, %esi # imm = 0x7D0
callq hipMalloc
movq %rsp, %rdi
movl $2000, %esi # imm = 0x7D0
callq hipMalloc
xorl %eax, %eax
xorl %ecx, %ecx
xorl %edx, %edx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, 4096(%rsp,%rdx,4)
movl %ecx, 2096(%rsp,%rdx,4)
incq %rdx
addl $3, %ecx
decl %eax
cmpq $500, %rdx # imm = 0x1F4
jne .LBB1_1
# %bb.2:
movq 16(%rsp), %rdi
leaq 4096(%rsp), %rsi
movl $2000, %edx # imm = 0x7D0
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 2096(%rsp), %rsi
movl $2000, %edx # imm = 0x7D0
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 499(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z6VecAddPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
leaq 96(%rsp), %rdi
movl $2000, %edx # imm = 0x7D0
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %esi, %esi
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl 2096(%rsp,%rsi,4), %eax
addl 4096(%rsp,%rsi,4), %eax
cmpl %eax, 96(%rsp,%rsi,4)
jne .LBB1_6
# %bb.7: # in Loop: Header=BB1_5 Depth=1
incq %rsi
cmpq $500, %rsi # imm = 0x1F4
jne .LBB1_5
jmp .LBB1_8
.LBB1_6:
movl $.L.str, %edi
# kill: def $esi killed $esi killed $rsi
xorl %eax, %eax
callq printf
.LBB1_8: # %.loopexit
xorl %eax, %eax
addq $6104, %rsp # imm = 0x17D8
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6VecAddPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6VecAddPiS_S_,@object # @_Z6VecAddPiS_S_
.section .rodata,"a",@progbits
.globl _Z6VecAddPiS_S_
.p2align 3, 0x0
_Z6VecAddPiS_S_:
.quad _Z21__device_stub__VecAddPiS_S_
.size _Z6VecAddPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "error en componente %d\n"
.size .L.str, 24
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6VecAddPiS_S_"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__VecAddPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6VecAddPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6VecAddPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6VecAddPiS_S_
.globl _Z6VecAddPiS_S_
.p2align 8
.type _Z6VecAddPiS_S_,@function
_Z6VecAddPiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6VecAddPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6VecAddPiS_S_, .Lfunc_end0-_Z6VecAddPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6VecAddPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z6VecAddPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00131469_00000000-6_suma-vectores1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z6VecAddPiS_S_PiS_S_
.type _Z29__device_stub__Z6VecAddPiS_S_PiS_S_, @function
_Z29__device_stub__Z6VecAddPiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6VecAddPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z29__device_stub__Z6VecAddPiS_S_PiS_S_, .-_Z29__device_stub__Z6VecAddPiS_S_PiS_S_
.globl _Z6VecAddPiS_S_
.type _Z6VecAddPiS_S_, @function
_Z6VecAddPiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z6VecAddPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6VecAddPiS_S_, .-_Z6VecAddPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "error en componente %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $4096, %rsp
.cfi_def_cfa_offset 4104
orq $0, (%rsp)
subq $1976, %rsp
.cfi_def_cfa_offset 6080
movq %fs:40, %rax
movq %rax, 6056(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $2000, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $2000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $2000, %esi
call cudaMalloc@PLT
movl $0, %eax
.L12:
movl %eax, %edx
negl %edx
movl %edx, 48(%rsp,%rax,4)
leal (%rax,%rax,2), %edx
movl %edx, 2048(%rsp,%rax,4)
addq $1, %rax
cmpq $500, %rax
jne .L12
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $2000, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 2048(%rsp), %rsi
movl $1, %ecx
movl $2000, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $500, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L21
.L13:
leaq 4048(%rsp), %rdi
movl $2, %ecx
movl $2000, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movl $0, %edx
leaq 4048(%rsp), %rcx
.L16:
movl 2048(%rsp,%rdx,4), %eax
addl 48(%rsp,%rdx,4), %eax
cmpl %eax, (%rcx,%rdx,4)
jne .L22
addq $1, %rdx
cmpq $500, %rdx
jne .L16
jmp .L15
.L21:
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z29__device_stub__Z6VecAddPiS_S_PiS_S_
jmp .L13
.L22:
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L15:
movq 6056(%rsp), %rax
subq %fs:40, %rax
jne .L23
movl $0, %eax
addq $6072, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z6VecAddPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z6VecAddPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "suma-vectores1.hip"
.globl _Z21__device_stub__VecAddPiS_S_ # -- Begin function _Z21__device_stub__VecAddPiS_S_
.p2align 4, 0x90
.type _Z21__device_stub__VecAddPiS_S_,@function
_Z21__device_stub__VecAddPiS_S_: # @_Z21__device_stub__VecAddPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6VecAddPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z21__device_stub__VecAddPiS_S_, .Lfunc_end0-_Z21__device_stub__VecAddPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $6104, %rsp # imm = 0x17D8
.cfi_def_cfa_offset 6112
leaq 16(%rsp), %rdi
movl $2000, %esi # imm = 0x7D0
callq hipMalloc
leaq 8(%rsp), %rdi
movl $2000, %esi # imm = 0x7D0
callq hipMalloc
movq %rsp, %rdi
movl $2000, %esi # imm = 0x7D0
callq hipMalloc
xorl %eax, %eax
xorl %ecx, %ecx
xorl %edx, %edx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, 4096(%rsp,%rdx,4)
movl %ecx, 2096(%rsp,%rdx,4)
incq %rdx
addl $3, %ecx
decl %eax
cmpq $500, %rdx # imm = 0x1F4
jne .LBB1_1
# %bb.2:
movq 16(%rsp), %rdi
leaq 4096(%rsp), %rsi
movl $2000, %edx # imm = 0x7D0
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 2096(%rsp), %rsi
movl $2000, %edx # imm = 0x7D0
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 499(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z6VecAddPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
leaq 96(%rsp), %rdi
movl $2000, %edx # imm = 0x7D0
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %esi, %esi
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl 2096(%rsp,%rsi,4), %eax
addl 4096(%rsp,%rsi,4), %eax
cmpl %eax, 96(%rsp,%rsi,4)
jne .LBB1_6
# %bb.7: # in Loop: Header=BB1_5 Depth=1
incq %rsi
cmpq $500, %rsi # imm = 0x1F4
jne .LBB1_5
jmp .LBB1_8
.LBB1_6:
movl $.L.str, %edi
# kill: def $esi killed $esi killed $rsi
xorl %eax, %eax
callq printf
.LBB1_8: # %.loopexit
xorl %eax, %eax
addq $6104, %rsp # imm = 0x17D8
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6VecAddPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6VecAddPiS_S_,@object # @_Z6VecAddPiS_S_
.section .rodata,"a",@progbits
.globl _Z6VecAddPiS_S_
.p2align 3, 0x0
_Z6VecAddPiS_S_:
.quad _Z21__device_stub__VecAddPiS_S_
.size _Z6VecAddPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "error en componente %d\n"
.size .L.str, 24
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6VecAddPiS_S_"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__VecAddPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6VecAddPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void compute_distances(float * ref, int ref_width, int ref_pitch, float * query, int query_width, int query_pitch, int height, float * dist) {
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Initializarion of the SSD for the current thread
float ssd = 0.f;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * ref_pitch;
step_B = BLOCK_DIM * query_pitch;
end_A = begin_A + (height-1) * ref_pitch;
// Conditions
int cond0 = (begin_A + tx < ref_width); // used to write in shared memory
int cond1 = (begin_B + tx < query_width); // used to write in shared memory & to computations and to write in output array
int cond2 = (begin_A + ty < ref_width); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/ref_pitch + ty < height) {
shared_A[ty][tx] = (cond0)? ref[a + ref_pitch * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? query[b + query_pitch * ty + tx] : 0;
}
else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k){
float tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceeding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1) {
dist[ (begin_A + ty) * query_pitch + begin_B + tx ] = ssd;
}
} | code for sm_80
Function : _Z17compute_distancesPfiiS_iiiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.Y ; /* 0x0000000000047919 */
/* 0x000e220000002600 */
/*0020*/ ULDC UR4, c[0x0][0x180] ; /* 0x0000600000047ab9 */
/* 0x000fe20000000800 */
/*0030*/ MOV R6, c[0x0][0x16c] ; /* 0x00005b0000067a02 */
/* 0x000fe20000000f00 */
/*0040*/ UIADD3 UR4, UR4, -0x1, URZ ; /* 0xffffffff04047890 */
/* 0x000fe2000fffe03f */
/*0050*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e620000002500 */
/*0060*/ ULDC UR5, c[0x0][0x16c] ; /* 0x00005b0000057ab9 */
/* 0x000fe20000000800 */
/*0070*/ MOV R7, c[0x0][0x17c] ; /* 0x00005f0000077a02 */
/* 0x000fe20000000f00 */
/*0080*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fe2000f8e023f */
/*0090*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000ea20000002200 */
/*00a0*/ IMAD.SHL.U32 R6, R6, 0x10, RZ ; /* 0x0000001006067824 */
/* 0x000fe200078e00ff */
/*00b0*/ SHF.L.U32 R7, R7, 0x4, RZ ; /* 0x0000000407077819 */
/* 0x000fe200000006ff */
/*00c0*/ HFMA2.MMA R15, -RZ, RZ, 0, 0 ; /* 0x00000000ff0f7435 */
/* 0x000fe200000001ff */
/*00d0*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000ee20000002100 */
/*00e0*/ ISETP.LE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fc4000bf23270 */
/*00f0*/ SHF.L.U32 R4, R4, 0x4, RZ ; /* 0x0000000404047819 */
/* 0x001fe400000006ff */
/*0100*/ SHF.L.U32 R5, R5, 0x4, RZ ; /* 0x0000000405057819 */
/* 0x002fe400000006ff */
/*0110*/ IADD3 R8, R4.reuse, UR4, RZ ; /* 0x0000000404087c10 */
/* 0x040fe2000fffe0ff */
/*0120*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0130*/ IADD3 R2, R4, R3, RZ ; /* 0x0000000304027210 */
/* 0x004fe20007ffe0ff */
/*0140*/ STS.128 [0x800], R4 ; /* 0x00080004ff007388 */
/* 0x0001e60000000c00 */
/*0150*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fe20003f06270 */
/*0160*/ STS [0x810], R8 ; /* 0x00081008ff007388 */
/* 0x0001e20000000800 */
/*0170*/ IADD3 R2, R5, R0, RZ ; /* 0x0000000005027210 */
/* 0x008fc80007ffe0ff */
/*0180*/ ISETP.LT.AND P0, PT, R2, c[0x0][0x178], !P0 ; /* 0x00005e0002007a0c */
/* 0x000fe20004701270 */
/*0190*/ @!P1 BRA 0x990 ; /* 0x000007f000009947 */
/* 0x000fd80003800000 */
/*01a0*/ IABS R6, c[0x0][0x16c] ; /* 0x00005b0000067a13 */
/* 0x001fe20000000000 */
/*01b0*/ IMAD.MOV.U32 R15, RZ, RZ, RZ ; /* 0x000000ffff0f7224 */
/* 0x000fe200078e00ff */
/*01c0*/ IADD3 R8, R4, R0, RZ ; /* 0x0000000004087210 */
/* 0x000fe40007ffe0ff */
/*01d0*/ I2F.RP R10, R6 ; /* 0x00000006000a7306 */
/* 0x000e220000209400 */
/*01e0*/ SHF.L.U32 R12, R0, 0x2, RZ ; /* 0x00000002000c7819 */
/* 0x000fe400000006ff */
/*01f0*/ ISETP.GE.AND P1, PT, R8, c[0x0][0x168], PT ; /* 0x00005a0008007a0c */
/* 0x000fe40003f26270 */
/*0200*/ MOV R8, RZ ; /* 0x000000ff00087202 */
/* 0x000fc40000000f00 */
/*0210*/ MOV R7, R5 ; /* 0x0000000500077202 */
/* 0x000fe40000000f00 */
/*0220*/ LEA R5, R3.reuse, R12, 0x6 ; /* 0x0000000c03057211 */
/* 0x040fe200078e30ff */
/*0230*/ MUFU.RCP R10, R10 ; /* 0x0000000a000a7308 */
/* 0x001e240000001000 */
/*0240*/ IADD3 R9, R10, 0xffffffe, RZ ; /* 0x0ffffffe0a097810 */
/* 0x001fe20007ffe0ff */
/*0250*/ IMAD R10, R3, c[0x0][0x17c], R0 ; /* 0x00005f00030a7a24 */
/* 0x000fca00078e0200 */
/*0260*/ F2I.FTZ.U32.TRUNC.NTZ R9, R9 ; /* 0x0000000900097305 */
/* 0x000e24000021f000 */
/*0270*/ IMAD.MOV R11, RZ, RZ, -R9 ; /* 0x000000ffff0b7224 */
/* 0x001fc800078e0a09 */
/*0280*/ IMAD R11, R11, R6, RZ ; /* 0x000000060b0b7224 */
/* 0x000fc800078e02ff */
/*0290*/ IMAD.HI.U32 R8, R9, R11, R8 ; /* 0x0000000b09087227 */
/* 0x000fc800078e0008 */
/*02a0*/ IMAD R9, R3, c[0x0][0x16c], R0 ; /* 0x00005b0003097a24 */
/* 0x000fe400078e0200 */
/*02b0*/ IABS R13, R4 ; /* 0x00000004000d7213 */
/* 0x000fe20000000000 */
/*02c0*/ BSSY B0, 0x4d0 ; /* 0x0000020000007945 */
/* 0x000fe20003800000 */
/*02d0*/ IABS R14, c[0x0][0x16c] ; /* 0x00005b00000e7a13 */
/* 0x002fc60000000000 */
/*02e0*/ IMAD.HI.U32 R11, R8, R13, RZ ; /* 0x0000000d080b7227 */
/* 0x000fca00078e00ff */
/*02f0*/ IADD3 R12, -R11, RZ, RZ ; /* 0x000000ff0b0c7210 */
/* 0x000fca0007ffe1ff */
/*0300*/ IMAD R13, R14, R12, R13 ; /* 0x0000000c0e0d7224 */
/* 0x000fe200078e020d */
/*0310*/ LOP3.LUT R12, R4, c[0x0][0x16c], RZ, 0x3c, !PT ; /* 0x00005b00040c7a12 */
/* 0x000fc800078e3cff */
/*0320*/ ISETP.GT.U32.AND P4, PT, R6, R13, PT ; /* 0x0000000d0600720c */
/* 0x000fe40003f84070 */
/*0330*/ ISETP.GE.AND P3, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */
/* 0x000fd60003f66270 */
/*0340*/ @!P4 IADD3 R13, R13, -R14, RZ ; /* 0x8000000e0d0dc210 */
/* 0x000fe40007ffe0ff */
/*0350*/ @!P4 IADD3 R11, R11, 0x1, RZ ; /* 0x000000010b0bc810 */
/* 0x000fe40007ffe0ff */
/*0360*/ ISETP.GE.U32.AND P2, PT, R13, R6, PT ; /* 0x000000060d00720c */
/* 0x000fe40003f46070 */
/*0370*/ ISETP.NE.AND P4, PT, RZ, c[0x0][0x16c], PT ; /* 0x00005b00ff007a0c */
/* 0x000fd60003f85270 */
/*0380*/ @P2 IADD3 R11, R11, 0x1, RZ ; /* 0x000000010b0b2810 */
/* 0x000fc80007ffe0ff */
/*0390*/ @!P3 IADD3 R11, -R11, RZ, RZ ; /* 0x000000ff0b0bb210 */
/* 0x000fe40007ffe1ff */
/*03a0*/ @!P4 LOP3.LUT R11, RZ, c[0x0][0x16c], RZ, 0x33, !PT ; /* 0x00005b00ff0bca12 */
/* 0x000fc800078e33ff */
/*03b0*/ IADD3 R11, R11, R3, RZ ; /* 0x000000030b0b7210 */
/* 0x000fc80007ffe0ff */
/*03c0*/ ISETP.GE.AND P2, PT, R11, c[0x0][0x180], PT ; /* 0x000060000b007a0c */
/* 0x000fda0003f46270 */
/*03d0*/ @P2 STS [R5], RZ ; /* 0x000000ff05002388 */
/* 0x0011e20000000800 */
/*03e0*/ @P2 IMAD.MOV.U32 R18, RZ, RZ, RZ ; /* 0x000000ffff122224 */
/* 0x000fe200078e00ff */
/*03f0*/ @P2 BRA 0x4c0 ; /* 0x000000c000002947 */
/* 0x000fea0003800000 */
/*0400*/ @!P1 IADD3 R12, R9, R4, RZ ; /* 0x00000004090c9210 */
/* 0x000fe20007ffe0ff */
/*0410*/ HFMA2.MMA R14, -RZ, RZ, 0, 0 ; /* 0x00000000ff0e7435 */
/* 0x000fe200000001ff */
/*0420*/ @!P1 MOV R13, 0x4 ; /* 0x00000004000d9802 */
/* 0x000fe40000000f00 */
/*0430*/ ISETP.GE.AND P2, PT, R2, c[0x0][0x178], PT ; /* 0x00005e0002007a0c */
/* 0x000fc60003f46270 */
/*0440*/ @!P1 IMAD.WIDE R12, R12, R13, c[0x0][0x160] ; /* 0x000058000c0c9625 */
/* 0x000fca00078e020d */
/*0450*/ @!P1 LDG.E R14, [R12.64] ; /* 0x000000040c0e9981 */
/* 0x000ea2000c1e1900 */
/*0460*/ IMAD.MOV.U32 R18, RZ, RZ, RZ ; /* 0x000000ffff127224 */
/* 0x000fc800078e00ff */
/*0470*/ @!P2 IADD3 R16, R10, R7, RZ ; /* 0x000000070a10a210 */
/* 0x000fe40007ffe0ff */
/*0480*/ @!P2 MOV R17, 0x4 ; /* 0x000000040011a802 */
/* 0x000fca0000000f00 */
/*0490*/ @!P2 IMAD.WIDE R16, R16, R17, c[0x0][0x170] ; /* 0x00005c001010a625 */
/* 0x000fca00078e0211 */
/*04a0*/ @!P2 LDG.E R18, [R16.64] ; /* 0x000000041012a981 */
/* 0x000368000c1e1900 */
/*04b0*/ STS [R5], R14 ; /* 0x0000000e05007388 */
/* 0x0043e40000000800 */
/*04c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*04d0*/ STS [R5+0x400], R18 ; /* 0x0004001205007388 */
/* 0x0205e20000000800 */
/*04e0*/ BSSY B0, 0x920 ; /* 0x0000043000007945 */
/* 0x000fe60003800000 */
/*04f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0500*/ @!P0 BRA 0x910 ; /* 0x0000040000008947 */
/* 0x000fea0003800000 */
/*0510*/ LDS R13, [R3.X4] ; /* 0x00000000030d7984 */
/* 0x004fe80000004800 */
/*0520*/ LDS R14, [R0.X4+0x400] ; /* 0x00040000000e7984 */
/* 0x002e680000004800 */
/*0530*/ LDS R26, [R0.X4+0x440] ; /* 0x00044000001a7984 */
/* 0x000fe80000004800 */
/*0540*/ LDS R27, [R3.X4+0x40] ; /* 0x00004000031b7984 */
/* 0x000ea80000004800 */
/*0550*/ LDS R17, [R0.X4+0x480] ; /* 0x0004800000117984 */
/* 0x000fe80000004800 */
/*0560*/ LDS R18, [R3.X4+0x80] ; /* 0x0000800003127984 */
/* 0x000ee80000004800 */
/*0570*/ LDS R23, [R0.X4+0x4c0] ; /* 0x0004c00000177984 */
/* 0x000fe80000004800 */
/*0580*/ LDS R24, [R3.X4+0xc0] ; /* 0x0000c00003187984 */
/* 0x000f280000004800 */
/*0590*/ LDS R19, [R0.X4+0x500] ; /* 0x0005000000137984 */
/* 0x000fe80000004800 */
/*05a0*/ LDS R20, [R3.X4+0x100] ; /* 0x0001000003147984 */
/* 0x000f680000004800 */
/*05b0*/ LDS R21, [R0.X4+0x540] ; /* 0x0005400000157984 */
/* 0x000fe80000004800 */
/*05c0*/ LDS R22, [R3.X4+0x140] ; /* 0x0001400003167984 */
/* 0x000e220000004800 */
/*05d0*/ FADD R16, R13, -R14 ; /* 0x8000000e0d107221 */
/* 0x002fc60000000000 */
/*05e0*/ LDS R11, [R0.X4+0x580] ; /* 0x00058000000b7984 */
/* 0x000fe20000004800 */
/*05f0*/ FFMA R25, R16, R16, R15 ; /* 0x0000001010197223 */
/* 0x000fc6000000000f */
/*0600*/ LDS R12, [R3.X4+0x180] ; /* 0x00018000030c7984 */
/* 0x000e620000004800 */
/*0610*/ FADD R26, -R26, R27 ; /* 0x0000001b1a1a7221 */
/* 0x004fc60000000100 */
/*0620*/ LDS R13, [R0.X4+0x5c0] ; /* 0x0005c000000d7984 */
/* 0x000fe20000004800 */
/*0630*/ FFMA R25, R26, R26, R25 ; /* 0x0000001a1a197223 */
/* 0x000fc60000000019 */
/*0640*/ LDS R14, [R3.X4+0x1c0] ; /* 0x0001c000030e7984 */
/* 0x000ea20000004800 */
/*0650*/ FADD R18, -R17, R18 ; /* 0x0000001211127221 */
/* 0x008fc60000000100 */
/*0660*/ LDS R15, [R0.X4+0x600] ; /* 0x00060000000f7984 */
/* 0x000fe20000004800 */
/*0670*/ FFMA R25, R18, R18, R25 ; /* 0x0000001212197223 */
/* 0x000fc60000000019 */
/*0680*/ LDS R16, [R3.X4+0x200] ; /* 0x0002000003107984 */
/* 0x000ee20000004800 */
/*0690*/ FADD R24, -R23, R24 ; /* 0x0000001817187221 */
/* 0x010fc60000000100 */
/*06a0*/ LDS R18, [R0.X4+0x640] ; /* 0x0006400000127984 */
/* 0x000fe20000004800 */
/*06b0*/ FFMA R25, R24, R24, R25 ; /* 0x0000001818197223 */
/* 0x000fe40000000019 */
/*06c0*/ FADD R24, -R19, R20 ; /* 0x0000001413187221 */
/* 0x020fe20000000100 */
/*06d0*/ LDS R17, [R3.X4+0x240] ; /* 0x0002400003117984 */
/* 0x000f260000004800 */
/*06e0*/ FFMA R25, R24, R24, R25 ; /* 0x0000001818197223 */
/* 0x000fe20000000019 */
/*06f0*/ LDS R20, [R0.X4+0x680] ; /* 0x0006800000147984 */
/* 0x000fe20000004800 */
/*0700*/ FADD R24, -R21, R22 ; /* 0x0000001615187221 */
/* 0x001fc60000000100 */
/*0710*/ LDS R19, [R3.X4+0x280] ; /* 0x0002800003137984 */
/* 0x000e220000004800 */
/*0720*/ FFMA R25, R24, R24, R25 ; /* 0x0000001818197223 */
/* 0x000fc60000000019 */
/*0730*/ LDS R22, [R0.X4+0x6c0] ; /* 0x0006c00000167984 */
/* 0x000fe20000004800 */
/*0740*/ FADD R24, -R11, R12 ; /* 0x0000000c0b187221 */
/* 0x002fc60000000100 */
/*0750*/ LDS R21, [R3.X4+0x2c0] ; /* 0x0002c00003157984 */
/* 0x000e620000004800 */
/*0760*/ FFMA R25, R24, R24, R25 ; /* 0x0000001818197223 */
/* 0x000fc60000000019 */
/*0770*/ LDS R11, [R0.X4+0x700] ; /* 0x00070000000b7984 */
/* 0x000fe20000004800 */
/*0780*/ FADD R24, -R13, R14 ; /* 0x0000000e0d187221 */
/* 0x004fc60000000100 */
/*0790*/ LDS R12, [R3.X4+0x300] ; /* 0x00030000030c7984 */
/* 0x000ea20000004800 */
/*07a0*/ FFMA R25, R24, R24, R25 ; /* 0x0000001818197223 */
/* 0x000fc60000000019 */
/*07b0*/ LDS R14, [R0.X4+0x740] ; /* 0x00074000000e7984 */
/* 0x000fe20000004800 */
/*07c0*/ FADD R26, -R15, R16 ; /* 0x000000100f1a7221 */
/* 0x008fc60000000100 */
/*07d0*/ LDS R13, [R3.X4+0x340] ; /* 0x00034000030d7984 */
/* 0x000ee20000004800 */
/*07e0*/ FFMA R25, R26, R26, R25 ; /* 0x0000001a1a197223 */
/* 0x000fc60000000019 */
/*07f0*/ LDS R24, [R0.X4+0x780] ; /* 0x0007800000187984 */
/* 0x000fe80000004800 */
/*0800*/ LDS R23, [R3.X4+0x380] ; /* 0x0003800003177984 */
/* 0x000f620000004800 */
/*0810*/ FADD R18, -R18, R17 ; /* 0x0000001112127221 */
/* 0x010fc60000000100 */
/*0820*/ LDS R16, [R0.X4+0x7c0] ; /* 0x0007c00000107984 */
/* 0x000fe20000004800 */
/*0830*/ FFMA R25, R18, R18, R25 ; /* 0x0000001212197223 */
/* 0x000fc60000000019 */
/*0840*/ LDS R15, [R3.X4+0x3c0] ; /* 0x0003c000030f7984 */
/* 0x000f220000004800 */
/*0850*/ FADD R20, -R20, R19 ; /* 0x0000001314147221 */
/* 0x001fc80000000100 */
/*0860*/ FFMA R25, R20, R20, R25 ; /* 0x0000001414197223 */
/* 0x000fe40000000019 */
/*0870*/ FADD R22, -R22, R21 ; /* 0x0000001516167221 */
/* 0x002fc80000000100 */
/*0880*/ FFMA R25, R22, R22, R25 ; /* 0x0000001616197223 */
/* 0x000fe40000000019 */
/*0890*/ FADD R12, -R11, R12 ; /* 0x0000000c0b0c7221 */
/* 0x004fc80000000100 */
/*08a0*/ FFMA R25, R12, R12, R25 ; /* 0x0000000c0c197223 */
/* 0x000fe40000000019 */
/*08b0*/ FADD R14, -R14, R13 ; /* 0x0000000d0e0e7221 */
/* 0x008fc80000000100 */
/*08c0*/ FFMA R25, R14, R14, R25 ; /* 0x0000000e0e197223 */
/* 0x000fe40000000019 */
/*08d0*/ FADD R24, -R24, R23 ; /* 0x0000001718187221 */
/* 0x020fc80000000100 */
/*08e0*/ FFMA R25, R24, R24, R25 ; /* 0x0000001818197223 */
/* 0x000fe40000000019 */
/*08f0*/ FADD R16, -R16, R15 ; /* 0x0000000f10107221 */
/* 0x010fc80000000100 */
/*0900*/ FFMA R15, R16, R16, R25 ; /* 0x00000010100f7223 */
/* 0x000fe40000000019 */
/*0910*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x004fea0003800000 */
/*0920*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0930*/ LDS.64 R12, [0x808] ; /* 0x00080800ff0c7984 */
/* 0x000ea80000000a00 */
/*0940*/ LDS R11, [0x810] ; /* 0x00081000ff0b7984 */
/* 0x000ee20000000800 */
/*0950*/ IADD3 R4, R12, R4, RZ ; /* 0x000000040c047210 */
/* 0x004fc40007ffe0ff */
/*0960*/ IADD3 R7, R7, R13, RZ ; /* 0x0000000d07077210 */
/* 0x000fe40007ffe0ff */
/*0970*/ ISETP.GT.AND P2, PT, R4, R11, PT ; /* 0x0000000b0400720c */
/* 0x008fda0003f44270 */
/*0980*/ @!P2 BRA 0x2b0 ; /* 0xfffff9200000a947 */
/* 0x000fea000383ffff */
/*0990*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*09a0*/ LDS.64 R4, [0x800] ; /* 0x00080000ff047984 */
/* 0x003e220000000a00 */
/*09b0*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*09c0*/ IADD3 R3, R3, R4, RZ ; /* 0x0000000403037210 */
/* 0x001fe40007ffe0ff */
/*09d0*/ IADD3 R0, R0, R5, RZ ; /* 0x0000000500007210 */
/* 0x000fca0007ffe0ff */
/*09e0*/ IMAD R3, R3, c[0x0][0x17c], R0 ; /* 0x00005f0003037a24 */
/* 0x000fc800078e0200 */
/*09f0*/ IMAD.WIDE R2, R3, R2, c[0x0][0x188] ; /* 0x0000620003027625 */
/* 0x000fca00078e0202 */
/*0a00*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x000fe2000c101904 */
/*0a10*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0a20*/ BRA 0xa20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0a30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0aa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ab0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ac0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ad0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ae0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0af0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void compute_distances(float * ref, int ref_width, int ref_pitch, float * query, int query_width, int query_pitch, int height, float * dist) {
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Initializarion of the SSD for the current thread
float ssd = 0.f;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * ref_pitch;
step_B = BLOCK_DIM * query_pitch;
end_A = begin_A + (height-1) * ref_pitch;
// Conditions
int cond0 = (begin_A + tx < ref_width); // used to write in shared memory
int cond1 = (begin_B + tx < query_width); // used to write in shared memory & to computations and to write in output array
int cond2 = (begin_A + ty < ref_width); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/ref_pitch + ty < height) {
shared_A[ty][tx] = (cond0)? ref[a + ref_pitch * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? query[b + query_pitch * ty + tx] : 0;
}
else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k){
float tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceeding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1) {
dist[ (begin_A + ty) * query_pitch + begin_B + tx ] = ssd;
}
} | .file "tmpxft_00130023_00000000-6_compute_distances.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z46__device_stub__Z17compute_distancesPfiiS_iiiS_PfiiS_iiiS_
.type _Z46__device_stub__Z17compute_distancesPfiiS_iiiS_PfiiS_iiiS_, @function
_Z46__device_stub__Z17compute_distancesPfiiS_iiiS_PfiiS_iiiS_:
.LFB2051:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movl %esi, 36(%rsp)
movl %edx, 32(%rsp)
movq %rcx, 24(%rsp)
movl %r8d, 20(%rsp)
movl %r9d, 16(%rsp)
movq 216(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 32(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 8(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z17compute_distancesPfiiS_iiiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z46__device_stub__Z17compute_distancesPfiiS_iiiS_PfiiS_iiiS_, .-_Z46__device_stub__Z17compute_distancesPfiiS_iiiS_PfiiS_iiiS_
.globl _Z17compute_distancesPfiiS_iiiS_
.type _Z17compute_distancesPfiiS_iiiS_, @function
_Z17compute_distancesPfiiS_iiiS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z46__device_stub__Z17compute_distancesPfiiS_iiiS_PfiiS_iiiS_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z17compute_distancesPfiiS_iiiS_, .-_Z17compute_distancesPfiiS_iiiS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z17compute_distancesPfiiS_iiiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z17compute_distancesPfiiS_iiiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void compute_distances(float * ref, int ref_width, int ref_pitch, float * query, int query_width, int query_pitch, int height, float * dist) {
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Initializarion of the SSD for the current thread
float ssd = 0.f;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * ref_pitch;
step_B = BLOCK_DIM * query_pitch;
end_A = begin_A + (height-1) * ref_pitch;
// Conditions
int cond0 = (begin_A + tx < ref_width); // used to write in shared memory
int cond1 = (begin_B + tx < query_width); // used to write in shared memory & to computations and to write in output array
int cond2 = (begin_A + ty < ref_width); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/ref_pitch + ty < height) {
shared_A[ty][tx] = (cond0)? ref[a + ref_pitch * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? query[b + query_pitch * ty + tx] : 0;
}
else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k){
float tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceeding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1) {
dist[ (begin_A + ty) * query_pitch + begin_B + tx ] = ssd;
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void compute_distances(float * ref, int ref_width, int ref_pitch, float * query, int query_width, int query_pitch, int height, float * dist) {
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Initializarion of the SSD for the current thread
float ssd = 0.f;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * ref_pitch;
step_B = BLOCK_DIM * query_pitch;
end_A = begin_A + (height-1) * ref_pitch;
// Conditions
int cond0 = (begin_A + tx < ref_width); // used to write in shared memory
int cond1 = (begin_B + tx < query_width); // used to write in shared memory & to computations and to write in output array
int cond2 = (begin_A + ty < ref_width); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/ref_pitch + ty < height) {
shared_A[ty][tx] = (cond0)? ref[a + ref_pitch * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? query[b + query_pitch * ty + tx] : 0;
}
else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k){
float tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceeding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1) {
dist[ (begin_A + ty) * query_pitch + begin_B + tx ] = ssd;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void compute_distances(float * ref, int ref_width, int ref_pitch, float * query, int query_width, int query_pitch, int height, float * dist) {
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Initializarion of the SSD for the current thread
float ssd = 0.f;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * ref_pitch;
step_B = BLOCK_DIM * query_pitch;
end_A = begin_A + (height-1) * ref_pitch;
// Conditions
int cond0 = (begin_A + tx < ref_width); // used to write in shared memory
int cond1 = (begin_B + tx < query_width); // used to write in shared memory & to computations and to write in output array
int cond2 = (begin_A + ty < ref_width); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/ref_pitch + ty < height) {
shared_A[ty][tx] = (cond0)? ref[a + ref_pitch * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? query[b + query_pitch * ty + tx] : 0;
}
else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k){
float tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceeding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1) {
dist[ (begin_A + ty) * query_pitch + begin_B + tx ] = ssd;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z17compute_distancesPfiiS_iiiS_
.globl _Z17compute_distancesPfiiS_iiiS_
.p2align 8
.type _Z17compute_distancesPfiiS_iiiS_,@function
_Z17compute_distancesPfiiS_iiiS_:
s_clause 0x2
s_load_b64 s[10:11], s[0:1], 0x8
s_load_b32 s12, s[0:1], 0x20
s_load_b64 s[4:5], s[0:1], 0x18
v_dual_mov_b32 v4, 0 :: v_dual_and_b32 v1, 0x3ff, v0
s_movk_i32 s2, 0x800
s_movk_i32 s3, 0x800
v_bfe_u32 v0, v0, 10, 10
s_lshl_b32 s13, s15, 4
s_lshl_b32 s16, s14, 4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
v_dual_mov_b32 v2, s13 :: v_dual_mov_b32 v3, s16
v_add_nc_u32_e32 v7, s16, v1
ds_store_b32 v4, v3 offset:2064
s_waitcnt lgkmcnt(0)
s_lshl_b32 s6, s5, 4
v_dual_mov_b32 v10, s6 :: v_dual_add_nc_u32 v5, s2, v4
s_lshl_b32 s2, s11, 4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
v_dual_mov_b32 v9, s2 :: v_dual_add_nc_u32 v6, s3, v4
s_add_i32 s3, s12, -1
v_cmp_gt_i32_e64 s2, s4, v7
s_mul_i32 s3, s3, s11
s_add_i32 s7, s3, s13
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v11, s7 :: v_dual_add_nc_u32 v8, s13, v0
s_cmp_gt_i32 s13, s7
ds_store_2addr_b32 v5, v10, v2 offset0:2 offset1:3
ds_store_2addr_b32 v6, v11, v9 offset1:1
v_cmp_gt_i32_e64 s3, s10, v8
s_cbranch_scc1 .LBB0_14
s_ashr_i32 s14, s11, 31
s_clause 0x1
s_load_b64 s[6:7], s[0:1], 0x0
s_load_b64 s[8:9], s[0:1], 0x10
s_add_i32 s4, s11, s14
v_lshlrev_b32_e32 v10, 2, v1
s_xor_b32 s15, s4, s14
v_dual_mov_b32 v6, s16 :: v_dual_add_nc_u32 v9, s13, v1
v_cvt_f32_u32_e32 v2, s15
v_lshlrev_b32_e32 v11, 6, v0
v_add_nc_u32_e32 v7, 0x400, v10
v_lshlrev_b32_e32 v5, 2, v0
v_cmp_gt_i32_e64 s4, s10, v9
v_rcp_iflag_f32_e32 v8, v2
v_mad_u64_u32 v[2:3], null, v0, s11, v[1:2]
s_and_b32 s10, s3, s2
s_sub_i32 s11, 0, s15
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3)
v_mad_u64_u32 v[3:4], null, v0, s5, v[1:2]
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v4, 0x4f7ffffe, v8
v_dual_mov_b32 v8, 0 :: v_dual_add_nc_u32 v9, v11, v10
v_add_nc_u32_e32 v10, v7, v11
v_cvt_u32_f32_e32 v11, v4
v_mov_b32_e32 v4, 0
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s16
s_movk_i32 s16, 0x800
s_delay_alu instid0(SALU_CYCLE_1)
v_add_nc_u32_e32 v12, s16, v8
s_barrier
buffer_gl0_inv
ds_load_2addr_b32 v[12:13], v12 offset1:1
ds_load_b32 v14, v8 offset:2056
s_waitcnt lgkmcnt(1)
v_readfirstlane_b32 s16, v13
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v14, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s13, s16, s13
v_cmp_gt_i32_e32 vcc_lo, s13, v12
s_cbranch_vccnz .LBB0_14
.LBB0_3:
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_readfirstlane_b32 s16, v11
s_ashr_i32 s17, s13, 31
s_add_i32 s19, s13, s17
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_xor_b32 s19, s19, s17
s_mul_i32 s18, s11, s16
s_xor_b32 s17, s17, s14
s_mul_hi_u32 s18, s16, s18
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s16, s16, s18
s_mul_hi_u32 s16, s19, s16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s18, s16, s15
s_sub_i32 s18, s19, s18
s_add_i32 s19, s16, 1
s_sub_i32 s20, s18, s15
s_cmp_ge_u32 s18, s15
s_cselect_b32 s16, s19, s16
s_cselect_b32 s18, s20, s18
s_add_i32 s19, s16, 1
s_cmp_ge_u32 s18, s15
s_cselect_b32 s16, s19, s16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_xor_b32 s16, s16, s17
s_sub_i32 s16, s16, s17
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v12, s16, v0
s_mov_b32 s16, exec_lo
v_cmpx_le_i32_e64 s12, v12
s_xor_b32 s16, exec_lo, s16
s_cbranch_execz .LBB0_5
ds_store_b32 v9, v8
ds_store_b32 v10, v8
.LBB0_5:
s_and_not1_saveexec_b32 s16, s16
s_cbranch_execz .LBB0_11
v_dual_mov_b32 v12, 0 :: v_dual_mov_b32 v13, 0
s_and_saveexec_b32 s17, s4
s_cbranch_execz .LBB0_8
v_add_nc_u32_e32 v13, s13, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v14, 31, v13
v_lshlrev_b64 v[13:14], 2, v[13:14]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v13, vcc_lo, s6, v13
v_add_co_ci_u32_e32 v14, vcc_lo, s7, v14, vcc_lo
global_load_b32 v13, v[13:14], off
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s17
s_waitcnt vmcnt(0)
ds_store_b32 v9, v13
s_and_saveexec_b32 s17, s2
s_cbranch_execz .LBB0_10
v_add_nc_u32_e32 v12, v3, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v13, 31, v12
v_lshlrev_b64 v[12:13], 2, v[12:13]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v12, vcc_lo, s8, v12
v_add_co_ci_u32_e32 v13, vcc_lo, s9, v13, vcc_lo
global_load_b32 v12, v[12:13], off
.LBB0_10:
s_or_b32 exec_lo, exec_lo, s17
s_waitcnt vmcnt(0)
ds_store_b32 v10, v12
.LBB0_11:
s_or_b32 exec_lo, exec_lo, s16
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s16, s10
s_cbranch_execz .LBB0_2
s_mov_b32 s17, 0
.LBB0_13:
s_delay_alu instid0(SALU_CYCLE_1)
v_add_nc_u32_e32 v12, s17, v5
v_add_nc_u32_e32 v13, s17, v7
s_add_i32 s17, s17, 64
ds_load_b32 v12, v12
ds_load_b32 v13, v13
s_cmpk_lg_i32 s17, 0x400
s_waitcnt lgkmcnt(0)
v_sub_f32_e32 v12, v12, v13
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v4, v12, v12
s_cbranch_scc1 .LBB0_13
s_branch .LBB0_2
.LBB0_14:
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s3, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_16
v_mov_b32_e32 v2, 0
s_movk_i32 s2, 0x800
s_load_b64 s[0:1], s[0:1], 0x28
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, s2, v2
ds_load_2addr_b32 v[2:3], v2 offset0:3 offset1:4
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v0, v2, v0
v_mul_lo_u32 v0, v0, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v0, v3, v1, v0
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v4, off
.LBB0_16:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17compute_distancesPfiiS_iiiS_
.amdhsa_group_segment_fixed_size 2068
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 48
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 21
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z17compute_distancesPfiiS_iiiS_, .Lfunc_end0-_Z17compute_distancesPfiiS_iiiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 2068
.kernarg_segment_align: 8
.kernarg_segment_size: 48
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17compute_distancesPfiiS_iiiS_
.private_segment_fixed_size: 0
.sgpr_count: 23
.sgpr_spill_count: 0
.symbol: _Z17compute_distancesPfiiS_iiiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void compute_distances(float * ref, int ref_width, int ref_pitch, float * query, int query_width, int query_pitch, int height, float * dist) {
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Initializarion of the SSD for the current thread
float ssd = 0.f;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * ref_pitch;
step_B = BLOCK_DIM * query_pitch;
end_A = begin_A + (height-1) * ref_pitch;
// Conditions
int cond0 = (begin_A + tx < ref_width); // used to write in shared memory
int cond1 = (begin_B + tx < query_width); // used to write in shared memory & to computations and to write in output array
int cond2 = (begin_A + ty < ref_width); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/ref_pitch + ty < height) {
shared_A[ty][tx] = (cond0)? ref[a + ref_pitch * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? query[b + query_pitch * ty + tx] : 0;
}
else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k){
float tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceeding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1) {
dist[ (begin_A + ty) * query_pitch + begin_B + tx ] = ssd;
}
} | .text
.file "compute_distances.hip"
.globl _Z32__device_stub__compute_distancesPfiiS_iiiS_ # -- Begin function _Z32__device_stub__compute_distancesPfiiS_iiiS_
.p2align 4, 0x90
.type _Z32__device_stub__compute_distancesPfiiS_iiiS_,@function
_Z32__device_stub__compute_distancesPfiiS_iiiS_: # @_Z32__device_stub__compute_distancesPfiiS_iiiS_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movl %edx, 8(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 160(%rsp), %rax
movq %rax, 128(%rsp)
leaq 168(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17compute_distancesPfiiS_iiiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z32__device_stub__compute_distancesPfiiS_iiiS_, .Lfunc_end0-_Z32__device_stub__compute_distancesPfiiS_iiiS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17compute_distancesPfiiS_iiiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z17compute_distancesPfiiS_iiiS_,@object # @_Z17compute_distancesPfiiS_iiiS_
.section .rodata,"a",@progbits
.globl _Z17compute_distancesPfiiS_iiiS_
.p2align 3, 0x0
_Z17compute_distancesPfiiS_iiiS_:
.quad _Z32__device_stub__compute_distancesPfiiS_iiiS_
.size _Z17compute_distancesPfiiS_iiiS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z17compute_distancesPfiiS_iiiS_"
.size .L__unnamed_1, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z32__device_stub__compute_distancesPfiiS_iiiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z17compute_distancesPfiiS_iiiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z17compute_distancesPfiiS_iiiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.Y ; /* 0x0000000000047919 */
/* 0x000e220000002600 */
/*0020*/ ULDC UR4, c[0x0][0x180] ; /* 0x0000600000047ab9 */
/* 0x000fe20000000800 */
/*0030*/ MOV R6, c[0x0][0x16c] ; /* 0x00005b0000067a02 */
/* 0x000fe20000000f00 */
/*0040*/ UIADD3 UR4, UR4, -0x1, URZ ; /* 0xffffffff04047890 */
/* 0x000fe2000fffe03f */
/*0050*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e620000002500 */
/*0060*/ ULDC UR5, c[0x0][0x16c] ; /* 0x00005b0000057ab9 */
/* 0x000fe20000000800 */
/*0070*/ MOV R7, c[0x0][0x17c] ; /* 0x00005f0000077a02 */
/* 0x000fe20000000f00 */
/*0080*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fe2000f8e023f */
/*0090*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000ea20000002200 */
/*00a0*/ IMAD.SHL.U32 R6, R6, 0x10, RZ ; /* 0x0000001006067824 */
/* 0x000fe200078e00ff */
/*00b0*/ SHF.L.U32 R7, R7, 0x4, RZ ; /* 0x0000000407077819 */
/* 0x000fe200000006ff */
/*00c0*/ HFMA2.MMA R15, -RZ, RZ, 0, 0 ; /* 0x00000000ff0f7435 */
/* 0x000fe200000001ff */
/*00d0*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000ee20000002100 */
/*00e0*/ ISETP.LE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fc4000bf23270 */
/*00f0*/ SHF.L.U32 R4, R4, 0x4, RZ ; /* 0x0000000404047819 */
/* 0x001fe400000006ff */
/*0100*/ SHF.L.U32 R5, R5, 0x4, RZ ; /* 0x0000000405057819 */
/* 0x002fe400000006ff */
/*0110*/ IADD3 R8, R4.reuse, UR4, RZ ; /* 0x0000000404087c10 */
/* 0x040fe2000fffe0ff */
/*0120*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0130*/ IADD3 R2, R4, R3, RZ ; /* 0x0000000304027210 */
/* 0x004fe20007ffe0ff */
/*0140*/ STS.128 [0x800], R4 ; /* 0x00080004ff007388 */
/* 0x0001e60000000c00 */
/*0150*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fe20003f06270 */
/*0160*/ STS [0x810], R8 ; /* 0x00081008ff007388 */
/* 0x0001e20000000800 */
/*0170*/ IADD3 R2, R5, R0, RZ ; /* 0x0000000005027210 */
/* 0x008fc80007ffe0ff */
/*0180*/ ISETP.LT.AND P0, PT, R2, c[0x0][0x178], !P0 ; /* 0x00005e0002007a0c */
/* 0x000fe20004701270 */
/*0190*/ @!P1 BRA 0x990 ; /* 0x000007f000009947 */
/* 0x000fd80003800000 */
/*01a0*/ IABS R6, c[0x0][0x16c] ; /* 0x00005b0000067a13 */
/* 0x001fe20000000000 */
/*01b0*/ IMAD.MOV.U32 R15, RZ, RZ, RZ ; /* 0x000000ffff0f7224 */
/* 0x000fe200078e00ff */
/*01c0*/ IADD3 R8, R4, R0, RZ ; /* 0x0000000004087210 */
/* 0x000fe40007ffe0ff */
/*01d0*/ I2F.RP R10, R6 ; /* 0x00000006000a7306 */
/* 0x000e220000209400 */
/*01e0*/ SHF.L.U32 R12, R0, 0x2, RZ ; /* 0x00000002000c7819 */
/* 0x000fe400000006ff */
/*01f0*/ ISETP.GE.AND P1, PT, R8, c[0x0][0x168], PT ; /* 0x00005a0008007a0c */
/* 0x000fe40003f26270 */
/*0200*/ MOV R8, RZ ; /* 0x000000ff00087202 */
/* 0x000fc40000000f00 */
/*0210*/ MOV R7, R5 ; /* 0x0000000500077202 */
/* 0x000fe40000000f00 */
/*0220*/ LEA R5, R3.reuse, R12, 0x6 ; /* 0x0000000c03057211 */
/* 0x040fe200078e30ff */
/*0230*/ MUFU.RCP R10, R10 ; /* 0x0000000a000a7308 */
/* 0x001e240000001000 */
/*0240*/ IADD3 R9, R10, 0xffffffe, RZ ; /* 0x0ffffffe0a097810 */
/* 0x001fe20007ffe0ff */
/*0250*/ IMAD R10, R3, c[0x0][0x17c], R0 ; /* 0x00005f00030a7a24 */
/* 0x000fca00078e0200 */
/*0260*/ F2I.FTZ.U32.TRUNC.NTZ R9, R9 ; /* 0x0000000900097305 */
/* 0x000e24000021f000 */
/*0270*/ IMAD.MOV R11, RZ, RZ, -R9 ; /* 0x000000ffff0b7224 */
/* 0x001fc800078e0a09 */
/*0280*/ IMAD R11, R11, R6, RZ ; /* 0x000000060b0b7224 */
/* 0x000fc800078e02ff */
/*0290*/ IMAD.HI.U32 R8, R9, R11, R8 ; /* 0x0000000b09087227 */
/* 0x000fc800078e0008 */
/*02a0*/ IMAD R9, R3, c[0x0][0x16c], R0 ; /* 0x00005b0003097a24 */
/* 0x000fe400078e0200 */
/*02b0*/ IABS R13, R4 ; /* 0x00000004000d7213 */
/* 0x000fe20000000000 */
/*02c0*/ BSSY B0, 0x4d0 ; /* 0x0000020000007945 */
/* 0x000fe20003800000 */
/*02d0*/ IABS R14, c[0x0][0x16c] ; /* 0x00005b00000e7a13 */
/* 0x002fc60000000000 */
/*02e0*/ IMAD.HI.U32 R11, R8, R13, RZ ; /* 0x0000000d080b7227 */
/* 0x000fca00078e00ff */
/*02f0*/ IADD3 R12, -R11, RZ, RZ ; /* 0x000000ff0b0c7210 */
/* 0x000fca0007ffe1ff */
/*0300*/ IMAD R13, R14, R12, R13 ; /* 0x0000000c0e0d7224 */
/* 0x000fe200078e020d */
/*0310*/ LOP3.LUT R12, R4, c[0x0][0x16c], RZ, 0x3c, !PT ; /* 0x00005b00040c7a12 */
/* 0x000fc800078e3cff */
/*0320*/ ISETP.GT.U32.AND P4, PT, R6, R13, PT ; /* 0x0000000d0600720c */
/* 0x000fe40003f84070 */
/*0330*/ ISETP.GE.AND P3, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */
/* 0x000fd60003f66270 */
/*0340*/ @!P4 IADD3 R13, R13, -R14, RZ ; /* 0x8000000e0d0dc210 */
/* 0x000fe40007ffe0ff */
/*0350*/ @!P4 IADD3 R11, R11, 0x1, RZ ; /* 0x000000010b0bc810 */
/* 0x000fe40007ffe0ff */
/*0360*/ ISETP.GE.U32.AND P2, PT, R13, R6, PT ; /* 0x000000060d00720c */
/* 0x000fe40003f46070 */
/*0370*/ ISETP.NE.AND P4, PT, RZ, c[0x0][0x16c], PT ; /* 0x00005b00ff007a0c */
/* 0x000fd60003f85270 */
/*0380*/ @P2 IADD3 R11, R11, 0x1, RZ ; /* 0x000000010b0b2810 */
/* 0x000fc80007ffe0ff */
/*0390*/ @!P3 IADD3 R11, -R11, RZ, RZ ; /* 0x000000ff0b0bb210 */
/* 0x000fe40007ffe1ff */
/*03a0*/ @!P4 LOP3.LUT R11, RZ, c[0x0][0x16c], RZ, 0x33, !PT ; /* 0x00005b00ff0bca12 */
/* 0x000fc800078e33ff */
/*03b0*/ IADD3 R11, R11, R3, RZ ; /* 0x000000030b0b7210 */
/* 0x000fc80007ffe0ff */
/*03c0*/ ISETP.GE.AND P2, PT, R11, c[0x0][0x180], PT ; /* 0x000060000b007a0c */
/* 0x000fda0003f46270 */
/*03d0*/ @P2 STS [R5], RZ ; /* 0x000000ff05002388 */
/* 0x0011e20000000800 */
/*03e0*/ @P2 IMAD.MOV.U32 R18, RZ, RZ, RZ ; /* 0x000000ffff122224 */
/* 0x000fe200078e00ff */
/*03f0*/ @P2 BRA 0x4c0 ; /* 0x000000c000002947 */
/* 0x000fea0003800000 */
/*0400*/ @!P1 IADD3 R12, R9, R4, RZ ; /* 0x00000004090c9210 */
/* 0x000fe20007ffe0ff */
/*0410*/ HFMA2.MMA R14, -RZ, RZ, 0, 0 ; /* 0x00000000ff0e7435 */
/* 0x000fe200000001ff */
/*0420*/ @!P1 MOV R13, 0x4 ; /* 0x00000004000d9802 */
/* 0x000fe40000000f00 */
/*0430*/ ISETP.GE.AND P2, PT, R2, c[0x0][0x178], PT ; /* 0x00005e0002007a0c */
/* 0x000fc60003f46270 */
/*0440*/ @!P1 IMAD.WIDE R12, R12, R13, c[0x0][0x160] ; /* 0x000058000c0c9625 */
/* 0x000fca00078e020d */
/*0450*/ @!P1 LDG.E R14, [R12.64] ; /* 0x000000040c0e9981 */
/* 0x000ea2000c1e1900 */
/*0460*/ IMAD.MOV.U32 R18, RZ, RZ, RZ ; /* 0x000000ffff127224 */
/* 0x000fc800078e00ff */
/*0470*/ @!P2 IADD3 R16, R10, R7, RZ ; /* 0x000000070a10a210 */
/* 0x000fe40007ffe0ff */
/*0480*/ @!P2 MOV R17, 0x4 ; /* 0x000000040011a802 */
/* 0x000fca0000000f00 */
/*0490*/ @!P2 IMAD.WIDE R16, R16, R17, c[0x0][0x170] ; /* 0x00005c001010a625 */
/* 0x000fca00078e0211 */
/*04a0*/ @!P2 LDG.E R18, [R16.64] ; /* 0x000000041012a981 */
/* 0x000368000c1e1900 */
/*04b0*/ STS [R5], R14 ; /* 0x0000000e05007388 */
/* 0x0043e40000000800 */
/*04c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*04d0*/ STS [R5+0x400], R18 ; /* 0x0004001205007388 */
/* 0x0205e20000000800 */
/*04e0*/ BSSY B0, 0x920 ; /* 0x0000043000007945 */
/* 0x000fe60003800000 */
/*04f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0500*/ @!P0 BRA 0x910 ; /* 0x0000040000008947 */
/* 0x000fea0003800000 */
/*0510*/ LDS R13, [R3.X4] ; /* 0x00000000030d7984 */
/* 0x004fe80000004800 */
/*0520*/ LDS R14, [R0.X4+0x400] ; /* 0x00040000000e7984 */
/* 0x002e680000004800 */
/*0530*/ LDS R26, [R0.X4+0x440] ; /* 0x00044000001a7984 */
/* 0x000fe80000004800 */
/*0540*/ LDS R27, [R3.X4+0x40] ; /* 0x00004000031b7984 */
/* 0x000ea80000004800 */
/*0550*/ LDS R17, [R0.X4+0x480] ; /* 0x0004800000117984 */
/* 0x000fe80000004800 */
/*0560*/ LDS R18, [R3.X4+0x80] ; /* 0x0000800003127984 */
/* 0x000ee80000004800 */
/*0570*/ LDS R23, [R0.X4+0x4c0] ; /* 0x0004c00000177984 */
/* 0x000fe80000004800 */
/*0580*/ LDS R24, [R3.X4+0xc0] ; /* 0x0000c00003187984 */
/* 0x000f280000004800 */
/*0590*/ LDS R19, [R0.X4+0x500] ; /* 0x0005000000137984 */
/* 0x000fe80000004800 */
/*05a0*/ LDS R20, [R3.X4+0x100] ; /* 0x0001000003147984 */
/* 0x000f680000004800 */
/*05b0*/ LDS R21, [R0.X4+0x540] ; /* 0x0005400000157984 */
/* 0x000fe80000004800 */
/*05c0*/ LDS R22, [R3.X4+0x140] ; /* 0x0001400003167984 */
/* 0x000e220000004800 */
/*05d0*/ FADD R16, R13, -R14 ; /* 0x8000000e0d107221 */
/* 0x002fc60000000000 */
/*05e0*/ LDS R11, [R0.X4+0x580] ; /* 0x00058000000b7984 */
/* 0x000fe20000004800 */
/*05f0*/ FFMA R25, R16, R16, R15 ; /* 0x0000001010197223 */
/* 0x000fc6000000000f */
/*0600*/ LDS R12, [R3.X4+0x180] ; /* 0x00018000030c7984 */
/* 0x000e620000004800 */
/*0610*/ FADD R26, -R26, R27 ; /* 0x0000001b1a1a7221 */
/* 0x004fc60000000100 */
/*0620*/ LDS R13, [R0.X4+0x5c0] ; /* 0x0005c000000d7984 */
/* 0x000fe20000004800 */
/*0630*/ FFMA R25, R26, R26, R25 ; /* 0x0000001a1a197223 */
/* 0x000fc60000000019 */
/*0640*/ LDS R14, [R3.X4+0x1c0] ; /* 0x0001c000030e7984 */
/* 0x000ea20000004800 */
/*0650*/ FADD R18, -R17, R18 ; /* 0x0000001211127221 */
/* 0x008fc60000000100 */
/*0660*/ LDS R15, [R0.X4+0x600] ; /* 0x00060000000f7984 */
/* 0x000fe20000004800 */
/*0670*/ FFMA R25, R18, R18, R25 ; /* 0x0000001212197223 */
/* 0x000fc60000000019 */
/*0680*/ LDS R16, [R3.X4+0x200] ; /* 0x0002000003107984 */
/* 0x000ee20000004800 */
/*0690*/ FADD R24, -R23, R24 ; /* 0x0000001817187221 */
/* 0x010fc60000000100 */
/*06a0*/ LDS R18, [R0.X4+0x640] ; /* 0x0006400000127984 */
/* 0x000fe20000004800 */
/*06b0*/ FFMA R25, R24, R24, R25 ; /* 0x0000001818197223 */
/* 0x000fe40000000019 */
/*06c0*/ FADD R24, -R19, R20 ; /* 0x0000001413187221 */
/* 0x020fe20000000100 */
/*06d0*/ LDS R17, [R3.X4+0x240] ; /* 0x0002400003117984 */
/* 0x000f260000004800 */
/*06e0*/ FFMA R25, R24, R24, R25 ; /* 0x0000001818197223 */
/* 0x000fe20000000019 */
/*06f0*/ LDS R20, [R0.X4+0x680] ; /* 0x0006800000147984 */
/* 0x000fe20000004800 */
/*0700*/ FADD R24, -R21, R22 ; /* 0x0000001615187221 */
/* 0x001fc60000000100 */
/*0710*/ LDS R19, [R3.X4+0x280] ; /* 0x0002800003137984 */
/* 0x000e220000004800 */
/*0720*/ FFMA R25, R24, R24, R25 ; /* 0x0000001818197223 */
/* 0x000fc60000000019 */
/*0730*/ LDS R22, [R0.X4+0x6c0] ; /* 0x0006c00000167984 */
/* 0x000fe20000004800 */
/*0740*/ FADD R24, -R11, R12 ; /* 0x0000000c0b187221 */
/* 0x002fc60000000100 */
/*0750*/ LDS R21, [R3.X4+0x2c0] ; /* 0x0002c00003157984 */
/* 0x000e620000004800 */
/*0760*/ FFMA R25, R24, R24, R25 ; /* 0x0000001818197223 */
/* 0x000fc60000000019 */
/*0770*/ LDS R11, [R0.X4+0x700] ; /* 0x00070000000b7984 */
/* 0x000fe20000004800 */
/*0780*/ FADD R24, -R13, R14 ; /* 0x0000000e0d187221 */
/* 0x004fc60000000100 */
/*0790*/ LDS R12, [R3.X4+0x300] ; /* 0x00030000030c7984 */
/* 0x000ea20000004800 */
/*07a0*/ FFMA R25, R24, R24, R25 ; /* 0x0000001818197223 */
/* 0x000fc60000000019 */
/*07b0*/ LDS R14, [R0.X4+0x740] ; /* 0x00074000000e7984 */
/* 0x000fe20000004800 */
/*07c0*/ FADD R26, -R15, R16 ; /* 0x000000100f1a7221 */
/* 0x008fc60000000100 */
/*07d0*/ LDS R13, [R3.X4+0x340] ; /* 0x00034000030d7984 */
/* 0x000ee20000004800 */
/*07e0*/ FFMA R25, R26, R26, R25 ; /* 0x0000001a1a197223 */
/* 0x000fc60000000019 */
/*07f0*/ LDS R24, [R0.X4+0x780] ; /* 0x0007800000187984 */
/* 0x000fe80000004800 */
/*0800*/ LDS R23, [R3.X4+0x380] ; /* 0x0003800003177984 */
/* 0x000f620000004800 */
/*0810*/ FADD R18, -R18, R17 ; /* 0x0000001112127221 */
/* 0x010fc60000000100 */
/*0820*/ LDS R16, [R0.X4+0x7c0] ; /* 0x0007c00000107984 */
/* 0x000fe20000004800 */
/*0830*/ FFMA R25, R18, R18, R25 ; /* 0x0000001212197223 */
/* 0x000fc60000000019 */
/*0840*/ LDS R15, [R3.X4+0x3c0] ; /* 0x0003c000030f7984 */
/* 0x000f220000004800 */
/*0850*/ FADD R20, -R20, R19 ; /* 0x0000001314147221 */
/* 0x001fc80000000100 */
/*0860*/ FFMA R25, R20, R20, R25 ; /* 0x0000001414197223 */
/* 0x000fe40000000019 */
/*0870*/ FADD R22, -R22, R21 ; /* 0x0000001516167221 */
/* 0x002fc80000000100 */
/*0880*/ FFMA R25, R22, R22, R25 ; /* 0x0000001616197223 */
/* 0x000fe40000000019 */
/*0890*/ FADD R12, -R11, R12 ; /* 0x0000000c0b0c7221 */
/* 0x004fc80000000100 */
/*08a0*/ FFMA R25, R12, R12, R25 ; /* 0x0000000c0c197223 */
/* 0x000fe40000000019 */
/*08b0*/ FADD R14, -R14, R13 ; /* 0x0000000d0e0e7221 */
/* 0x008fc80000000100 */
/*08c0*/ FFMA R25, R14, R14, R25 ; /* 0x0000000e0e197223 */
/* 0x000fe40000000019 */
/*08d0*/ FADD R24, -R24, R23 ; /* 0x0000001718187221 */
/* 0x020fc80000000100 */
/*08e0*/ FFMA R25, R24, R24, R25 ; /* 0x0000001818197223 */
/* 0x000fe40000000019 */
/*08f0*/ FADD R16, -R16, R15 ; /* 0x0000000f10107221 */
/* 0x010fc80000000100 */
/*0900*/ FFMA R15, R16, R16, R25 ; /* 0x00000010100f7223 */
/* 0x000fe40000000019 */
/*0910*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x004fea0003800000 */
/*0920*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0930*/ LDS.64 R12, [0x808] ; /* 0x00080800ff0c7984 */
/* 0x000ea80000000a00 */
/*0940*/ LDS R11, [0x810] ; /* 0x00081000ff0b7984 */
/* 0x000ee20000000800 */
/*0950*/ IADD3 R4, R12, R4, RZ ; /* 0x000000040c047210 */
/* 0x004fc40007ffe0ff */
/*0960*/ IADD3 R7, R7, R13, RZ ; /* 0x0000000d07077210 */
/* 0x000fe40007ffe0ff */
/*0970*/ ISETP.GT.AND P2, PT, R4, R11, PT ; /* 0x0000000b0400720c */
/* 0x008fda0003f44270 */
/*0980*/ @!P2 BRA 0x2b0 ; /* 0xfffff9200000a947 */
/* 0x000fea000383ffff */
/*0990*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*09a0*/ LDS.64 R4, [0x800] ; /* 0x00080000ff047984 */
/* 0x003e220000000a00 */
/*09b0*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*09c0*/ IADD3 R3, R3, R4, RZ ; /* 0x0000000403037210 */
/* 0x001fe40007ffe0ff */
/*09d0*/ IADD3 R0, R0, R5, RZ ; /* 0x0000000500007210 */
/* 0x000fca0007ffe0ff */
/*09e0*/ IMAD R3, R3, c[0x0][0x17c], R0 ; /* 0x00005f0003037a24 */
/* 0x000fc800078e0200 */
/*09f0*/ IMAD.WIDE R2, R3, R2, c[0x0][0x188] ; /* 0x0000620003027625 */
/* 0x000fca00078e0202 */
/*0a00*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x000fe2000c101904 */
/*0a10*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0a20*/ BRA 0xa20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0a30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0aa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ab0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ac0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ad0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ae0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0af0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z17compute_distancesPfiiS_iiiS_
.globl _Z17compute_distancesPfiiS_iiiS_
.p2align 8
.type _Z17compute_distancesPfiiS_iiiS_,@function
_Z17compute_distancesPfiiS_iiiS_:
s_clause 0x2
s_load_b64 s[10:11], s[0:1], 0x8
s_load_b32 s12, s[0:1], 0x20
s_load_b64 s[4:5], s[0:1], 0x18
v_dual_mov_b32 v4, 0 :: v_dual_and_b32 v1, 0x3ff, v0
s_movk_i32 s2, 0x800
s_movk_i32 s3, 0x800
v_bfe_u32 v0, v0, 10, 10
s_lshl_b32 s13, s15, 4
s_lshl_b32 s16, s14, 4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
v_dual_mov_b32 v2, s13 :: v_dual_mov_b32 v3, s16
v_add_nc_u32_e32 v7, s16, v1
ds_store_b32 v4, v3 offset:2064
s_waitcnt lgkmcnt(0)
s_lshl_b32 s6, s5, 4
v_dual_mov_b32 v10, s6 :: v_dual_add_nc_u32 v5, s2, v4
s_lshl_b32 s2, s11, 4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
v_dual_mov_b32 v9, s2 :: v_dual_add_nc_u32 v6, s3, v4
s_add_i32 s3, s12, -1
v_cmp_gt_i32_e64 s2, s4, v7
s_mul_i32 s3, s3, s11
s_add_i32 s7, s3, s13
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v11, s7 :: v_dual_add_nc_u32 v8, s13, v0
s_cmp_gt_i32 s13, s7
ds_store_2addr_b32 v5, v10, v2 offset0:2 offset1:3
ds_store_2addr_b32 v6, v11, v9 offset1:1
v_cmp_gt_i32_e64 s3, s10, v8
s_cbranch_scc1 .LBB0_14
s_ashr_i32 s14, s11, 31
s_clause 0x1
s_load_b64 s[6:7], s[0:1], 0x0
s_load_b64 s[8:9], s[0:1], 0x10
s_add_i32 s4, s11, s14
v_lshlrev_b32_e32 v10, 2, v1
s_xor_b32 s15, s4, s14
v_dual_mov_b32 v6, s16 :: v_dual_add_nc_u32 v9, s13, v1
v_cvt_f32_u32_e32 v2, s15
v_lshlrev_b32_e32 v11, 6, v0
v_add_nc_u32_e32 v7, 0x400, v10
v_lshlrev_b32_e32 v5, 2, v0
v_cmp_gt_i32_e64 s4, s10, v9
v_rcp_iflag_f32_e32 v8, v2
v_mad_u64_u32 v[2:3], null, v0, s11, v[1:2]
s_and_b32 s10, s3, s2
s_sub_i32 s11, 0, s15
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3)
v_mad_u64_u32 v[3:4], null, v0, s5, v[1:2]
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v4, 0x4f7ffffe, v8
v_dual_mov_b32 v8, 0 :: v_dual_add_nc_u32 v9, v11, v10
v_add_nc_u32_e32 v10, v7, v11
v_cvt_u32_f32_e32 v11, v4
v_mov_b32_e32 v4, 0
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s16
s_movk_i32 s16, 0x800
s_delay_alu instid0(SALU_CYCLE_1)
v_add_nc_u32_e32 v12, s16, v8
s_barrier
buffer_gl0_inv
ds_load_2addr_b32 v[12:13], v12 offset1:1
ds_load_b32 v14, v8 offset:2056
s_waitcnt lgkmcnt(1)
v_readfirstlane_b32 s16, v13
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v14, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s13, s16, s13
v_cmp_gt_i32_e32 vcc_lo, s13, v12
s_cbranch_vccnz .LBB0_14
.LBB0_3:
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_readfirstlane_b32 s16, v11
s_ashr_i32 s17, s13, 31
s_add_i32 s19, s13, s17
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_xor_b32 s19, s19, s17
s_mul_i32 s18, s11, s16
s_xor_b32 s17, s17, s14
s_mul_hi_u32 s18, s16, s18
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s16, s16, s18
s_mul_hi_u32 s16, s19, s16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s18, s16, s15
s_sub_i32 s18, s19, s18
s_add_i32 s19, s16, 1
s_sub_i32 s20, s18, s15
s_cmp_ge_u32 s18, s15
s_cselect_b32 s16, s19, s16
s_cselect_b32 s18, s20, s18
s_add_i32 s19, s16, 1
s_cmp_ge_u32 s18, s15
s_cselect_b32 s16, s19, s16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_xor_b32 s16, s16, s17
s_sub_i32 s16, s16, s17
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v12, s16, v0
s_mov_b32 s16, exec_lo
v_cmpx_le_i32_e64 s12, v12
s_xor_b32 s16, exec_lo, s16
s_cbranch_execz .LBB0_5
ds_store_b32 v9, v8
ds_store_b32 v10, v8
.LBB0_5:
s_and_not1_saveexec_b32 s16, s16
s_cbranch_execz .LBB0_11
v_dual_mov_b32 v12, 0 :: v_dual_mov_b32 v13, 0
s_and_saveexec_b32 s17, s4
s_cbranch_execz .LBB0_8
v_add_nc_u32_e32 v13, s13, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v14, 31, v13
v_lshlrev_b64 v[13:14], 2, v[13:14]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v13, vcc_lo, s6, v13
v_add_co_ci_u32_e32 v14, vcc_lo, s7, v14, vcc_lo
global_load_b32 v13, v[13:14], off
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s17
s_waitcnt vmcnt(0)
ds_store_b32 v9, v13
s_and_saveexec_b32 s17, s2
s_cbranch_execz .LBB0_10
v_add_nc_u32_e32 v12, v3, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v13, 31, v12
v_lshlrev_b64 v[12:13], 2, v[12:13]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v12, vcc_lo, s8, v12
v_add_co_ci_u32_e32 v13, vcc_lo, s9, v13, vcc_lo
global_load_b32 v12, v[12:13], off
.LBB0_10:
s_or_b32 exec_lo, exec_lo, s17
s_waitcnt vmcnt(0)
ds_store_b32 v10, v12
.LBB0_11:
s_or_b32 exec_lo, exec_lo, s16
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s16, s10
s_cbranch_execz .LBB0_2
s_mov_b32 s17, 0
.LBB0_13:
s_delay_alu instid0(SALU_CYCLE_1)
v_add_nc_u32_e32 v12, s17, v5
v_add_nc_u32_e32 v13, s17, v7
s_add_i32 s17, s17, 64
ds_load_b32 v12, v12
ds_load_b32 v13, v13
s_cmpk_lg_i32 s17, 0x400
s_waitcnt lgkmcnt(0)
v_sub_f32_e32 v12, v12, v13
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v4, v12, v12
s_cbranch_scc1 .LBB0_13
s_branch .LBB0_2
.LBB0_14:
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s3, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_16
v_mov_b32_e32 v2, 0
s_movk_i32 s2, 0x800
s_load_b64 s[0:1], s[0:1], 0x28
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, s2, v2
ds_load_2addr_b32 v[2:3], v2 offset0:3 offset1:4
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v0, v2, v0
v_mul_lo_u32 v0, v0, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v0, v3, v1, v0
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v4, off
.LBB0_16:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17compute_distancesPfiiS_iiiS_
.amdhsa_group_segment_fixed_size 2068
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 48
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 21
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z17compute_distancesPfiiS_iiiS_, .Lfunc_end0-_Z17compute_distancesPfiiS_iiiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 2068
.kernarg_segment_align: 8
.kernarg_segment_size: 48
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17compute_distancesPfiiS_iiiS_
.private_segment_fixed_size: 0
.sgpr_count: 23
.sgpr_spill_count: 0
.symbol: _Z17compute_distancesPfiiS_iiiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00130023_00000000-6_compute_distances.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z46__device_stub__Z17compute_distancesPfiiS_iiiS_PfiiS_iiiS_
.type _Z46__device_stub__Z17compute_distancesPfiiS_iiiS_PfiiS_iiiS_, @function
_Z46__device_stub__Z17compute_distancesPfiiS_iiiS_PfiiS_iiiS_:
.LFB2051:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movl %esi, 36(%rsp)
movl %edx, 32(%rsp)
movq %rcx, 24(%rsp)
movl %r8d, 20(%rsp)
movl %r9d, 16(%rsp)
movq 216(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 32(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 8(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z17compute_distancesPfiiS_iiiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z46__device_stub__Z17compute_distancesPfiiS_iiiS_PfiiS_iiiS_, .-_Z46__device_stub__Z17compute_distancesPfiiS_iiiS_PfiiS_iiiS_
.globl _Z17compute_distancesPfiiS_iiiS_
.type _Z17compute_distancesPfiiS_iiiS_, @function
_Z17compute_distancesPfiiS_iiiS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z46__device_stub__Z17compute_distancesPfiiS_iiiS_PfiiS_iiiS_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z17compute_distancesPfiiS_iiiS_, .-_Z17compute_distancesPfiiS_iiiS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z17compute_distancesPfiiS_iiiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z17compute_distancesPfiiS_iiiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "compute_distances.hip"
.globl _Z32__device_stub__compute_distancesPfiiS_iiiS_ # -- Begin function _Z32__device_stub__compute_distancesPfiiS_iiiS_
.p2align 4, 0x90
.type _Z32__device_stub__compute_distancesPfiiS_iiiS_,@function
_Z32__device_stub__compute_distancesPfiiS_iiiS_: # @_Z32__device_stub__compute_distancesPfiiS_iiiS_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movl %edx, 8(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 160(%rsp), %rax
movq %rax, 128(%rsp)
leaq 168(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17compute_distancesPfiiS_iiiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z32__device_stub__compute_distancesPfiiS_iiiS_, .Lfunc_end0-_Z32__device_stub__compute_distancesPfiiS_iiiS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17compute_distancesPfiiS_iiiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z17compute_distancesPfiiS_iiiS_,@object # @_Z17compute_distancesPfiiS_iiiS_
.section .rodata,"a",@progbits
.globl _Z17compute_distancesPfiiS_iiiS_
.p2align 3, 0x0
_Z17compute_distancesPfiiS_iiiS_:
.quad _Z32__device_stub__compute_distancesPfiiS_iiiS_
.size _Z17compute_distancesPfiiS_iiiS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z17compute_distancesPfiiS_iiiS_"
.size .L__unnamed_1, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z32__device_stub__compute_distancesPfiiS_iiiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z17compute_distancesPfiiS_iiiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /* Compute the sum of two vectors using CUDA
* Vishwas S
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void add(int *a, int *b, int *c, int n)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
if(id<n)
c[id] = a[id] + b[id];
}
int main()
{
int N;
int *a, *b, *c, *da, *db, *dc;
scanf("%d",&N);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
a = (int *)calloc(N,sizeof(int));
b = (int *)calloc(N,sizeof(int));
c = (int *)calloc(N,sizeof(int));
for(int i = 0; i < N; i++)
{
a[i] = rand()%48;
b[i] = rand()%50;
}
int size = N*sizeof(int);
cudaMalloc(&da,size);
cudaMalloc(&db,size);
cudaMalloc(&dc,size);
cudaMemcpy(da,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(db,b,size,cudaMemcpyHostToDevice);
cudaEventRecord(start);
add<<<(N+511)/512,512>>>(da,db,dc,N); //block count, threads per block
cudaEventRecord(stop);
cudaMemcpy(c,dc,size,cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float ms;
cudaEventElapsedTime(&ms,start,stop);
printf("%f\n",ms);
} | code for sm_80
Function : _Z3addPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* Compute the sum of two vectors using CUDA
* Vishwas S
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void add(int *a, int *b, int *c, int n)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
if(id<n)
c[id] = a[id] + b[id];
}
int main()
{
int N;
int *a, *b, *c, *da, *db, *dc;
scanf("%d",&N);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
a = (int *)calloc(N,sizeof(int));
b = (int *)calloc(N,sizeof(int));
c = (int *)calloc(N,sizeof(int));
for(int i = 0; i < N; i++)
{
a[i] = rand()%48;
b[i] = rand()%50;
}
int size = N*sizeof(int);
cudaMalloc(&da,size);
cudaMalloc(&db,size);
cudaMalloc(&dc,size);
cudaMemcpy(da,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(db,b,size,cudaMemcpyHostToDevice);
cudaEventRecord(start);
add<<<(N+511)/512,512>>>(da,db,dc,N); //block count, threads per block
cudaEventRecord(stop);
cudaMemcpy(c,dc,size,cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float ms;
cudaEventElapsedTime(&ms,start,stop);
printf("%f\n",ms);
} | .file "tmpxft_0004a228_00000000-6_13b.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z3addPiS_S_iPiS_S_i
.type _Z27__device_stub__Z3addPiS_S_iPiS_S_i, @function
_Z27__device_stub__Z3addPiS_S_iPiS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z3addPiS_S_iPiS_S_i, .-_Z27__device_stub__Z3addPiS_S_iPiS_S_i
.globl _Z3addPiS_S_i
.type _Z3addPiS_S_i, @function
_Z3addPiS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z3addPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z3addPiS_S_i, .-_Z3addPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d"
.LC1:
.string "%f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $80, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq 4(%rsp), %rsi
leaq .LC0(%rip), %rdi
call __isoc23_scanf@PLT
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movl 4(%rsp), %r14d
movslq %r14d, %rbx
movl $4, %esi
movq %rbx, %rdi
call calloc@PLT
movq %rax, %r12
movl $4, %esi
movq %rbx, %rdi
call calloc@PLT
movq %rax, %rbp
movl $4, %esi
movq %rbx, %rdi
call calloc@PLT
movq %rax, %r13
testl %r14d, %r14d
jle .L12
movl $0, %ebx
.L13:
call rand@PLT
movslq %eax, %rdx
imulq $715827883, %rdx, %rdx
sarq $35, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,2), %edx
sall $4, %edx
subl %edx, %eax
movl %eax, (%r12,%rbx,4)
call rand@PLT
movslq %eax, %rdx
imulq $1374389535, %rdx, %rdx
sarq $36, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $50, %edx, %edx
subl %edx, %eax
movl %eax, 0(%rbp,%rbx,4)
movl 4(%rsp), %r14d
addq $1, %rbx
cmpl %ebx, %r14d
jg .L13
.L12:
sall $2, %r14d
movslq %r14d, %r14
leaq 8(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r14, %rdx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r14, %rdx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $512, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl 4(%rsp), %edx
leal 1022(%rdx), %eax
addl $511, %edx
cmovns %edx, %eax
sarl $9, %eax
movl %eax, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L14:
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movl $2, %ecx
movq %r14, %rdx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 60(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 60(%rsp), %xmm0
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L19
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
movl 4(%rsp), %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z27__device_stub__Z3addPiS_S_iPiS_S_i
jmp .L14
.L19:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z3addPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* Compute the sum of two vectors using CUDA
* Vishwas S
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void add(int *a, int *b, int *c, int n)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
if(id<n)
c[id] = a[id] + b[id];
}
int main()
{
int N;
int *a, *b, *c, *da, *db, *dc;
scanf("%d",&N);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
a = (int *)calloc(N,sizeof(int));
b = (int *)calloc(N,sizeof(int));
c = (int *)calloc(N,sizeof(int));
for(int i = 0; i < N; i++)
{
a[i] = rand()%48;
b[i] = rand()%50;
}
int size = N*sizeof(int);
cudaMalloc(&da,size);
cudaMalloc(&db,size);
cudaMalloc(&dc,size);
cudaMemcpy(da,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(db,b,size,cudaMemcpyHostToDevice);
cudaEventRecord(start);
add<<<(N+511)/512,512>>>(da,db,dc,N); //block count, threads per block
cudaEventRecord(stop);
cudaMemcpy(c,dc,size,cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float ms;
cudaEventElapsedTime(&ms,start,stop);
printf("%f\n",ms);
} | /* Compute the sum of two vectors using CUDA
* Vishwas S
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void add(int *a, int *b, int *c, int n)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
if(id<n)
c[id] = a[id] + b[id];
}
int main()
{
int N;
int *a, *b, *c, *da, *db, *dc;
scanf("%d",&N);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
a = (int *)calloc(N,sizeof(int));
b = (int *)calloc(N,sizeof(int));
c = (int *)calloc(N,sizeof(int));
for(int i = 0; i < N; i++)
{
a[i] = rand()%48;
b[i] = rand()%50;
}
int size = N*sizeof(int);
hipMalloc(&da,size);
hipMalloc(&db,size);
hipMalloc(&dc,size);
hipMemcpy(da,a,size,hipMemcpyHostToDevice);
hipMemcpy(db,b,size,hipMemcpyHostToDevice);
hipEventRecord(start);
add<<<(N+511)/512,512>>>(da,db,dc,N); //block count, threads per block
hipEventRecord(stop);
hipMemcpy(c,dc,size,hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float ms;
hipEventElapsedTime(&ms,start,stop);
printf("%f\n",ms);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /* Compute the sum of two vectors using CUDA
* Vishwas S
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void add(int *a, int *b, int *c, int n)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
if(id<n)
c[id] = a[id] + b[id];
}
int main()
{
int N;
int *a, *b, *c, *da, *db, *dc;
scanf("%d",&N);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
a = (int *)calloc(N,sizeof(int));
b = (int *)calloc(N,sizeof(int));
c = (int *)calloc(N,sizeof(int));
for(int i = 0; i < N; i++)
{
a[i] = rand()%48;
b[i] = rand()%50;
}
int size = N*sizeof(int);
hipMalloc(&da,size);
hipMalloc(&db,size);
hipMalloc(&dc,size);
hipMemcpy(da,a,size,hipMemcpyHostToDevice);
hipMemcpy(db,b,size,hipMemcpyHostToDevice);
hipEventRecord(start);
add<<<(N+511)/512,512>>>(da,db,dc,N); //block count, threads per block
hipEventRecord(stop);
hipMemcpy(c,dc,size,hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float ms;
hipEventElapsedTime(&ms,start,stop);
printf("%f\n",ms);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_i
.globl _Z3addPiS_S_i
.p2align 8
.type _Z3addPiS_S_i,@function
_Z3addPiS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_i, .Lfunc_end0-_Z3addPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.