system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // Number of threads per block.
#define NT 1024
// Structure for a 3-D point.
typedef struct {
double x;
double y;
double z;
}point_t;
// Structure for a solution.
typedef struct {
int a;
int b;
double d;
}solution_t;
// Variables in global memory.
__device__ int devBestSol;
// Per-thread variables in shared memory.
__shared__ solution_t shrSols[NT];
__shared__ int shrBestSolIndex[NT];
/**
* Calculates the city-block distance between two point_t structs as defiend
* by this function. distance(P1,P2) = |x1 β x2| + |y1 β y2| + |z1 β z2|
* @param p1 A pointer to the first point.
* @param p2 A pointer to the second point.
* @return The city block distance between p1 and p2.
*/
__device__ double distance(point_t *p1, point_t *p2) {
double tempX = p1->x - p2->x;
if (tempX < 0) {
tempX *= -1;
}
double tempY = p1->y - p2->y;
if (tempY < 0) {
tempY *= -1;
}
double tempZ = p1->z - p2->z;
if (tempZ < 0) {
tempZ *= -1;
}
return tempX + tempY + tempZ;
}
/**
* Compares to different solution_t to find the one with the lowest distance then a index and finall b index.
* @param a Pointer to first solution.
* @param b Pointer to second solution.
* @return true if a is the better solution false otherwise.
*/
__device__ bool compareSol(solution_t *a, solution_t *b){
bool aIsbest = false;
if(a->d == -1.0){aIsbest = false;}
else if(b->d == -1.0){aIsbest = true;}
else if (b->d > a->d) {
aIsbest = true;
} else if (b->d == a->d) {
if (b->a > a->a){
aIsbest = true;
}else{
if (b->b > a->b){
aIsbest = true;
}
}
}
return aIsbest;
}
/**
* Device kernel to calculate the distance for each point to its closest medoid.
*
* Called with a one-dimensional grid of one-dimensional blocks, N blocks, NT
* threads per block. N = number of points. Each block finds the best solution
* for its given A index. Each thread within a block computes its total
* distance for its B index(s).
*
* @param pointList Array of all the points.
* @param solutions Array of all the solutions the gpu finds.
* @param N Total number of points.
*/
extern "C" __global__ void computeMedoids
(point_t *pointList, solution_t *solutions, int N) {
int a = blockIdx.x, b; // X index of this block, the A medoids index
int thrd = threadIdx.x; // Index of this thread within block
point_t medA = pointList[a]; // Medoid A's point
// double *solu = &solutions[a + b * N];
double d = 0.0;
solution_t sol;
sol.a = a;
sol.b = thrd;
sol.d = -1.0;
for (b = thrd; b < N; b += NT) {
d = 0;
solution_t tempSol;
for (int p = 0; p < N; p++){
point_t medB = pointList[b]; // Medoid B's point
if (p == a || p == b) continue;
double distA = distance( &medA, &pointList[p]);
double distB = distance( &medB, &pointList[p]);
d += (distA <= distB) ? distA : distB;
tempSol.a = a;
tempSol.b = b;
tempSol.d = (b <= a | b >= N) ? -1 : d;
}
if(!compareSol(&sol, &tempSol))
sol = tempSol;
}
shrSols[thrd] = sol;
shrBestSolIndex[thrd] = thrd;
__syncthreads();
// Reduction to find the best solution in the current block
for (int s = NT / 2; s > 0; s >>= 1) {
if (thrd < s){
shrBestSolIndex[thrd] = compareSol(&shrSols[shrBestSolIndex[thrd]], &shrSols[shrBestSolIndex[thrd + s]]) ? shrBestSolIndex[thrd] : shrBestSolIndex[thrd + s];
}
__syncthreads();
}
solutions[a] = shrSols[shrBestSolIndex[0]];
__threadfence();
// Final reduction to find the best solution overall.
if (thrd == 0) {
int oldSol;
int newSol;
do {
oldSol = devBestSol;
newSol = compareSol(&solutions[devBestSol], &solutions[a]) ? devBestSol : a;
} while (atomicCAS (&devBestSol, oldSol, newSol) != oldSol);
}
} | .file "tmpxft_0014714d_00000000-6_KMedoids.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2031:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2031:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8distanceP7point_tS0_
.type _Z8distanceP7point_tS0_, @function
_Z8distanceP7point_tS0_:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z8distanceP7point_tS0_, .-_Z8distanceP7point_tS0_
.globl _Z10compareSolP10solution_tS0_
.type _Z10compareSolP10solution_tS0_, @function
_Z10compareSolP10solution_tS0_:
.LFB2028:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2028:
.size _Z10compareSolP10solution_tS0_, .-_Z10compareSolP10solution_tS0_
.globl _Z55__device_stub__Z14computeMedoidsP7point_tP10solution_tiP7point_tP10solution_ti
.type _Z55__device_stub__Z14computeMedoidsP7point_tP10solution_tiP7point_tP10solution_ti, @function
_Z55__device_stub__Z14computeMedoidsP7point_tP10solution_tiP7point_tP10solution_ti:
.LFB2053:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq computeMedoids(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z55__device_stub__Z14computeMedoidsP7point_tP10solution_tiP7point_tP10solution_ti, .-_Z55__device_stub__Z14computeMedoidsP7point_tP10solution_tiP7point_tP10solution_ti
.globl computeMedoids
.type computeMedoids, @function
computeMedoids:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z55__device_stub__Z14computeMedoidsP7point_tP10solution_tiP7point_tP10solution_ti
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size computeMedoids, .-computeMedoids
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "computeMedoids"
.LC1:
.string "devBestSol"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq computeMedoids(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10devBestSol(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL10devBestSol
.comm _ZL10devBestSol,4,4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // Number of threads per block.
#define NT 1024
// Structure for a 3-D point.
typedef struct {
double x;
double y;
double z;
}point_t;
// Structure for a solution.
typedef struct {
int a;
int b;
double d;
}solution_t;
// Variables in global memory.
__device__ int devBestSol;
// Per-thread variables in shared memory.
__shared__ solution_t shrSols[NT];
__shared__ int shrBestSolIndex[NT];
/**
* Calculates the city-block distance between two point_t structs as defiend
* by this function. distance(P1,P2) = |x1 β x2| + |y1 β y2| + |z1 β z2|
* @param p1 A pointer to the first point.
* @param p2 A pointer to the second point.
* @return The city block distance between p1 and p2.
*/
__device__ double distance(point_t *p1, point_t *p2) {
double tempX = p1->x - p2->x;
if (tempX < 0) {
tempX *= -1;
}
double tempY = p1->y - p2->y;
if (tempY < 0) {
tempY *= -1;
}
double tempZ = p1->z - p2->z;
if (tempZ < 0) {
tempZ *= -1;
}
return tempX + tempY + tempZ;
}
/**
* Compares to different solution_t to find the one with the lowest distance then a index and finall b index.
* @param a Pointer to first solution.
* @param b Pointer to second solution.
* @return true if a is the better solution false otherwise.
*/
__device__ bool compareSol(solution_t *a, solution_t *b){
bool aIsbest = false;
if(a->d == -1.0){aIsbest = false;}
else if(b->d == -1.0){aIsbest = true;}
else if (b->d > a->d) {
aIsbest = true;
} else if (b->d == a->d) {
if (b->a > a->a){
aIsbest = true;
}else{
if (b->b > a->b){
aIsbest = true;
}
}
}
return aIsbest;
}
/**
* Device kernel to calculate the distance for each point to its closest medoid.
*
* Called with a one-dimensional grid of one-dimensional blocks, N blocks, NT
* threads per block. N = number of points. Each block finds the best solution
* for its given A index. Each thread within a block computes its total
* distance for its B index(s).
*
* @param pointList Array of all the points.
* @param solutions Array of all the solutions the gpu finds.
* @param N Total number of points.
*/
extern "C" __global__ void computeMedoids
(point_t *pointList, solution_t *solutions, int N) {
int a = blockIdx.x, b; // X index of this block, the A medoids index
int thrd = threadIdx.x; // Index of this thread within block
point_t medA = pointList[a]; // Medoid A's point
// double *solu = &solutions[a + b * N];
double d = 0.0;
solution_t sol;
sol.a = a;
sol.b = thrd;
sol.d = -1.0;
for (b = thrd; b < N; b += NT) {
d = 0;
solution_t tempSol;
for (int p = 0; p < N; p++){
point_t medB = pointList[b]; // Medoid B's point
if (p == a || p == b) continue;
double distA = distance( &medA, &pointList[p]);
double distB = distance( &medB, &pointList[p]);
d += (distA <= distB) ? distA : distB;
tempSol.a = a;
tempSol.b = b;
tempSol.d = (b <= a | b >= N) ? -1 : d;
}
if(!compareSol(&sol, &tempSol))
sol = tempSol;
}
shrSols[thrd] = sol;
shrBestSolIndex[thrd] = thrd;
__syncthreads();
// Reduction to find the best solution in the current block
for (int s = NT / 2; s > 0; s >>= 1) {
if (thrd < s){
shrBestSolIndex[thrd] = compareSol(&shrSols[shrBestSolIndex[thrd]], &shrSols[shrBestSolIndex[thrd + s]]) ? shrBestSolIndex[thrd] : shrBestSolIndex[thrd + s];
}
__syncthreads();
}
solutions[a] = shrSols[shrBestSolIndex[0]];
__threadfence();
// Final reduction to find the best solution overall.
if (thrd == 0) {
int oldSol;
int newSol;
do {
oldSol = devBestSol;
newSol = compareSol(&solutions[devBestSol], &solutions[a]) ? devBestSol : a;
} while (atomicCAS (&devBestSol, oldSol, newSol) != oldSol);
}
} | #include <hip/hip_runtime.h>
// Number of threads per block.
#define NT 1024
// Structure for a 3-D point.
typedef struct {
double x;
double y;
double z;
}point_t;
// Structure for a solution.
typedef struct {
int a;
int b;
double d;
}solution_t;
// Variables in global memory.
__device__ int devBestSol;
// Per-thread variables in shared memory.
__shared__ solution_t shrSols[NT];
__shared__ int shrBestSolIndex[NT];
/**
* Calculates the city-block distance between two point_t structs as defiend
* by this function. distance(P1,P2) = |x1 β x2| + |y1 β y2| + |z1 β z2|
* @param p1 A pointer to the first point.
* @param p2 A pointer to the second point.
* @return The city block distance between p1 and p2.
*/
__device__ double distance(point_t *p1, point_t *p2) {
double tempX = p1->x - p2->x;
if (tempX < 0) {
tempX *= -1;
}
double tempY = p1->y - p2->y;
if (tempY < 0) {
tempY *= -1;
}
double tempZ = p1->z - p2->z;
if (tempZ < 0) {
tempZ *= -1;
}
return tempX + tempY + tempZ;
}
/**
* Compares to different solution_t to find the one with the lowest distance then a index and finall b index.
* @param a Pointer to first solution.
* @param b Pointer to second solution.
* @return true if a is the better solution false otherwise.
*/
__device__ bool compareSol(solution_t *a, solution_t *b){
bool aIsbest = false;
if(a->d == -1.0){aIsbest = false;}
else if(b->d == -1.0){aIsbest = true;}
else if (b->d > a->d) {
aIsbest = true;
} else if (b->d == a->d) {
if (b->a > a->a){
aIsbest = true;
}else{
if (b->b > a->b){
aIsbest = true;
}
}
}
return aIsbest;
}
/**
* Device kernel to calculate the distance for each point to its closest medoid.
*
* Called with a one-dimensional grid of one-dimensional blocks, N blocks, NT
* threads per block. N = number of points. Each block finds the best solution
* for its given A index. Each thread within a block computes its total
* distance for its B index(s).
*
* @param pointList Array of all the points.
* @param solutions Array of all the solutions the gpu finds.
* @param N Total number of points.
*/
extern "C" __global__ void computeMedoids
(point_t *pointList, solution_t *solutions, int N) {
int a = blockIdx.x, b; // X index of this block, the A medoids index
int thrd = threadIdx.x; // Index of this thread within block
point_t medA = pointList[a]; // Medoid A's point
// double *solu = &solutions[a + b * N];
double d = 0.0;
solution_t sol;
sol.a = a;
sol.b = thrd;
sol.d = -1.0;
for (b = thrd; b < N; b += NT) {
d = 0;
solution_t tempSol;
for (int p = 0; p < N; p++){
point_t medB = pointList[b]; // Medoid B's point
if (p == a || p == b) continue;
double distA = distance( &medA, &pointList[p]);
double distB = distance( &medB, &pointList[p]);
d += (distA <= distB) ? distA : distB;
tempSol.a = a;
tempSol.b = b;
tempSol.d = (b <= a | b >= N) ? -1 : d;
}
if(!compareSol(&sol, &tempSol))
sol = tempSol;
}
shrSols[thrd] = sol;
shrBestSolIndex[thrd] = thrd;
__syncthreads();
// Reduction to find the best solution in the current block
for (int s = NT / 2; s > 0; s >>= 1) {
if (thrd < s){
shrBestSolIndex[thrd] = compareSol(&shrSols[shrBestSolIndex[thrd]], &shrSols[shrBestSolIndex[thrd + s]]) ? shrBestSolIndex[thrd] : shrBestSolIndex[thrd + s];
}
__syncthreads();
}
solutions[a] = shrSols[shrBestSolIndex[0]];
__threadfence();
// Final reduction to find the best solution overall.
if (thrd == 0) {
int oldSol;
int newSol;
do {
oldSol = devBestSol;
newSol = compareSol(&solutions[devBestSol], &solutions[a]) ? devBestSol : a;
} while (atomicCAS (&devBestSol, oldSol, newSol) != oldSol);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
// Number of threads per block.
#define NT 1024
// Structure for a 3-D point.
typedef struct {
double x;
double y;
double z;
}point_t;
// Structure for a solution.
typedef struct {
int a;
int b;
double d;
}solution_t;
// Variables in global memory.
__device__ int devBestSol;
// Per-thread variables in shared memory.
__shared__ solution_t shrSols[NT];
__shared__ int shrBestSolIndex[NT];
/**
* Calculates the city-block distance between two point_t structs as defiend
* by this function. distance(P1,P2) = |x1 β x2| + |y1 β y2| + |z1 β z2|
* @param p1 A pointer to the first point.
* @param p2 A pointer to the second point.
* @return The city block distance between p1 and p2.
*/
__device__ double distance(point_t *p1, point_t *p2) {
double tempX = p1->x - p2->x;
if (tempX < 0) {
tempX *= -1;
}
double tempY = p1->y - p2->y;
if (tempY < 0) {
tempY *= -1;
}
double tempZ = p1->z - p2->z;
if (tempZ < 0) {
tempZ *= -1;
}
return tempX + tempY + tempZ;
}
/**
* Compares to different solution_t to find the one with the lowest distance then a index and finall b index.
* @param a Pointer to first solution.
* @param b Pointer to second solution.
* @return true if a is the better solution false otherwise.
*/
__device__ bool compareSol(solution_t *a, solution_t *b){
bool aIsbest = false;
if(a->d == -1.0){aIsbest = false;}
else if(b->d == -1.0){aIsbest = true;}
else if (b->d > a->d) {
aIsbest = true;
} else if (b->d == a->d) {
if (b->a > a->a){
aIsbest = true;
}else{
if (b->b > a->b){
aIsbest = true;
}
}
}
return aIsbest;
}
/**
* Device kernel to calculate the distance for each point to its closest medoid.
*
* Called with a one-dimensional grid of one-dimensional blocks, N blocks, NT
* threads per block. N = number of points. Each block finds the best solution
* for its given A index. Each thread within a block computes its total
* distance for its B index(s).
*
* @param pointList Array of all the points.
* @param solutions Array of all the solutions the gpu finds.
* @param N Total number of points.
*/
extern "C" __global__ void computeMedoids
(point_t *pointList, solution_t *solutions, int N) {
int a = blockIdx.x, b; // X index of this block, the A medoids index
int thrd = threadIdx.x; // Index of this thread within block
point_t medA = pointList[a]; // Medoid A's point
// double *solu = &solutions[a + b * N];
double d = 0.0;
solution_t sol;
sol.a = a;
sol.b = thrd;
sol.d = -1.0;
for (b = thrd; b < N; b += NT) {
d = 0;
solution_t tempSol;
for (int p = 0; p < N; p++){
point_t medB = pointList[b]; // Medoid B's point
if (p == a || p == b) continue;
double distA = distance( &medA, &pointList[p]);
double distB = distance( &medB, &pointList[p]);
d += (distA <= distB) ? distA : distB;
tempSol.a = a;
tempSol.b = b;
tempSol.d = (b <= a | b >= N) ? -1 : d;
}
if(!compareSol(&sol, &tempSol))
sol = tempSol;
}
shrSols[thrd] = sol;
shrBestSolIndex[thrd] = thrd;
__syncthreads();
// Reduction to find the best solution in the current block
for (int s = NT / 2; s > 0; s >>= 1) {
if (thrd < s){
shrBestSolIndex[thrd] = compareSol(&shrSols[shrBestSolIndex[thrd]], &shrSols[shrBestSolIndex[thrd + s]]) ? shrBestSolIndex[thrd] : shrBestSolIndex[thrd + s];
}
__syncthreads();
}
solutions[a] = shrSols[shrBestSolIndex[0]];
__threadfence();
// Final reduction to find the best solution overall.
if (thrd == 0) {
int oldSol;
int newSol;
do {
oldSol = devBestSol;
newSol = compareSol(&solutions[devBestSol], &solutions[a]) ? devBestSol : a;
} while (atomicCAS (&devBestSol, oldSol, newSol) != oldSol);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected computeMedoids
.globl computeMedoids
.p2align 8
.type computeMedoids,@function
computeMedoids:
s_load_b32 s20, s[0:1], 0x10
v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v10, v0
v_dual_mov_b32 v2, 0xbff00000 :: v_dual_mov_b32 v9, s15
s_mov_b32 s6, s15
s_ashr_i32 s7, s15, 31
s_mov_b32 s21, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s20, v0
s_cbranch_execz .LBB0_17
s_load_b64 s[12:13], s[0:1], 0x0
s_mul_i32 s2, s6, 24
s_mul_hi_i32 s3, s6, 24
v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v10, v0
v_dual_mov_b32 v2, 0xbff00000 :: v_dual_mov_b32 v9, s6
v_mov_b32_e32 v11, v0
s_mov_b32 s23, 0
s_waitcnt lgkmcnt(0)
s_add_u32 s2, s12, s2
s_addc_u32 s3, s13, s3
s_cmp_gt_i32 s20, 0
s_clause 0x1
s_load_b128 s[8:11], s[2:3], 0x0
s_load_b64 s[14:15], s[2:3], 0x10
s_cselect_b32 s22, -1, 0
s_add_u32 s16, s12, 8
s_addc_u32 s17, s13, 0
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s3
v_add_nc_u32_e32 v11, 0x400, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e32 vcc_lo, s20, v11
s_or_b32 s23, vcc_lo, s23
s_and_not1_b32 exec_lo, exec_lo, s23
s_cbranch_execz .LBB0_16
.LBB0_3:
s_and_not1_b32 vcc_lo, exec_lo, s22
s_cbranch_vccnz .LBB0_8
v_mad_u64_u32 v[5:6], null, v11, 24, s[12:13]
v_mov_b32_e32 v7, 0
v_mov_b32_e32 v8, 0
v_cmp_lt_i32_e32 vcc_lo, s6, v11
s_mov_b32 s24, 0
s_mov_b64 s[18:19], s[16:17]
s_branch .LBB0_6
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s25
s_add_i32 s24, s24, 1
s_add_u32 s18, s18, 24
s_addc_u32 s19, s19, 0
s_cmp_eq_u32 s20, s24
s_cbranch_scc1 .LBB0_8
.LBB0_6:
v_cmp_ne_u32_e64 s2, s24, v11
s_cmp_lg_u32 s6, s24
s_cselect_b32 s3, -1, 0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s25, s2
s_cbranch_execz .LBB0_5
s_clause 0x1
global_load_b128 v[12:15], v[5:6], off
global_load_b64 v[3:4], v[5:6], off offset:16
s_add_u32 s2, s18, -8
s_addc_u32 s3, s19, -1
s_clause 0x1
s_load_b64 s[2:3], s[2:3], 0x0
s_load_b128 s[28:31], s[18:19], 0x0
s_waitcnt lgkmcnt(0)
v_add_f64 v[16:17], s[8:9], -s[2:3]
v_add_f64 v[18:19], s[10:11], -s[28:29]
v_add_f64 v[20:21], s[14:15], -s[30:31]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_xor_b32_e32 v22, 0x80000000, v17
v_xor_b32_e32 v23, 0x80000000, v19
s_delay_alu instid0(VALU_DEP_3)
v_xor_b32_e32 v26, 0x80000000, v21
s_waitcnt vmcnt(1)
v_add_f64 v[12:13], v[12:13], -s[2:3]
v_add_f64 v[14:15], v[14:15], -s[28:29]
v_cmp_gt_f64_e64 s2, 0, v[16:17]
v_cmp_gt_f64_e64 s3, 0, v[18:19]
s_waitcnt vmcnt(0)
v_add_f64 v[3:4], v[3:4], -s[30:31]
v_cmp_gt_f64_e64 s4, 0, v[12:13]
v_cmp_gt_f64_e64 s5, 0, v[14:15]
v_xor_b32_e32 v24, 0x80000000, v13
v_xor_b32_e32 v25, 0x80000000, v15
v_cndmask_b32_e64 v17, v17, v22, s2
v_cndmask_b32_e64 v19, v19, v23, s3
v_cndmask_b32_e64 v16, v16, v16, s2
v_cndmask_b32_e64 v18, v18, v18, s3
v_cmp_gt_f64_e64 s2, 0, v[3:4]
v_cmp_gt_f64_e64 s3, 0, v[20:21]
v_xor_b32_e32 v22, 0x80000000, v4
s_delay_alu instid0(VALU_DEP_4)
v_add_f64 v[16:17], v[16:17], v[18:19]
v_cndmask_b32_e64 v13, v13, v24, s4
v_cndmask_b32_e64 v15, v15, v25, s5
v_cndmask_b32_e64 v12, v12, v12, s4
v_cndmask_b32_e64 v14, v14, v14, s5
v_cndmask_b32_e64 v4, v4, v22, s2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_add_f64 v[12:13], v[12:13], v[14:15]
v_cndmask_b32_e64 v15, v21, v26, s3
v_cndmask_b32_e64 v14, v20, v20, s3
v_cndmask_b32_e64 v3, v3, v3, s2
v_add_f64 v[14:15], v[16:17], v[14:15]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_f64 v[3:4], v[12:13], v[3:4]
v_mov_b32_e32 v13, v11
v_cmp_le_f64_e64 s2, v[14:15], v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cndmask_b32_e64 v4, v4, v15, s2
v_cndmask_b32_e64 v3, v3, v14, s2
v_add_f64 v[7:8], v[7:8], v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_mov_b32 v12, s6 :: v_dual_cndmask_b32 v3, 0, v7
v_cndmask_b32_e32 v4, 0xbff00000, v8, vcc_lo
s_branch .LBB0_5
.LBB0_8:
s_mov_b32 s2, -1
s_mov_b32 s3, exec_lo
v_cmpx_neq_f64_e32 -1.0, v[1:2]
s_cbranch_execz .LBB0_14
v_cmp_neq_f64_e32 vcc_lo, -1.0, v[3:4]
v_cmp_ngt_f64_e64 s2, v[3:4], v[1:2]
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 s5, vcc_lo, s2
s_mov_b32 s2, 0
s_and_saveexec_b32 s4, s5
s_cbranch_execz .LBB0_13
s_mov_b32 s2, -1
s_mov_b32 s5, exec_lo
v_cmpx_eq_f64_e32 v[3:4], v[1:2]
v_cmp_le_i32_e32 vcc_lo, v12, v9
v_cmp_le_i32_e64 s2, v13, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_or_not1_b32 s2, s2, exec_lo
s_or_b32 exec_lo, exec_lo, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 s2, s2, exec_lo
.LBB0_13:
s_or_b32 exec_lo, exec_lo, s4
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s2, s2, exec_lo
.LBB0_14:
s_or_b32 exec_lo, exec_lo, s3
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_dual_mov_b32 v9, v12 :: v_dual_mov_b32 v10, v13
v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
s_branch .LBB0_2
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s23
.LBB0_17:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s21
v_lshlrev_b32_e32 v3, 2, v0
v_lshlrev_b32_e32 v4, 4, v0
s_movk_i32 s3, 0x200
ds_store_2addr_b32 v4, v9, v10 offset1:1
ds_store_b64 v4, v[1:2] offset:8
ds_store_b32 v3, v0 offset:16384
v_or_b32_e32 v5, 0x4000, v3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_branch .LBB0_23
.LBB0_18:
s_or_b32 exec_lo, exec_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 s9, s10, exec_lo
.LBB0_19:
s_or_b32 exec_lo, exec_lo, s8
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s8, s9, exec_lo
.LBB0_20:
s_or_b32 exec_lo, exec_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 s2, s8, exec_lo
.LBB0_21:
s_or_b32 exec_lo, exec_lo, s5
v_cndmask_b32_e64 v1, v6, v5, s2
ds_load_b32 v1, v1
s_waitcnt lgkmcnt(0)
ds_store_b32 v5, v1
.LBB0_22:
s_or_b32 exec_lo, exec_lo, s4
s_lshr_b32 s2, s3, 1
s_cmp_lt_u32 s3, 2
s_mov_b32 s3, s2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_29
.LBB0_23:
s_mov_b32 s4, exec_lo
v_cmpx_gt_u32_e64 s3, v0
s_cbranch_execz .LBB0_22
ds_load_b32 v1, v5
v_add_nc_u32_e32 v3, s3, v0
s_mov_b32 s2, 0
s_mov_b32 s5, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v6, v3, 2, 0x4000
s_waitcnt lgkmcnt(0)
v_lshlrev_b32_e32 v7, 4, v1
ds_load_b64 v[1:2], v7 offset:8
s_waitcnt lgkmcnt(0)
v_cmpx_neq_f64_e32 -1.0, v[1:2]
s_cbranch_execz .LBB0_21
ds_load_b32 v3, v6
s_mov_b32 s8, -1
s_waitcnt lgkmcnt(0)
v_lshlrev_b32_e32 v8, 4, v3
ds_load_b64 v[3:4], v8 offset:8
s_waitcnt lgkmcnt(0)
v_cmp_neq_f64_e32 vcc_lo, -1.0, v[3:4]
v_cmp_ngt_f64_e64 s2, v[3:4], v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s9, vcc_lo, s2
s_and_saveexec_b32 s2, s9
s_cbranch_execz .LBB0_20
s_mov_b32 s9, 0
s_mov_b32 s8, exec_lo
v_cmpx_eq_f64_e32 v[3:4], v[1:2]
s_cbranch_execz .LBB0_19
ds_load_b32 v1, v8
ds_load_b32 v2, v7
s_mov_b32 s10, -1
s_mov_b32 s9, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_le_i32_e64 v1, v2
s_cbranch_execz .LBB0_18
ds_load_b32 v1, v8 offset:4
ds_load_b32 v2, v7 offset:4
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, v1, v2
s_or_not1_b32 s10, vcc_lo, exec_lo
s_branch .LBB0_18
.LBB0_29:
v_mov_b32_e32 v7, 0
s_load_b64 s[2:3], s[0:1], 0x8
s_lshl_b64 s[0:1], s[6:7], 4
s_mov_b32 s7, 0
s_mov_b32 s8, exec_lo
ds_load_b32 v1, v7 offset:16384
s_waitcnt lgkmcnt(0)
s_add_u32 s4, s2, s0
s_addc_u32 s5, s3, s1
v_lshlrev_b32_e32 v1, 4, v1
ds_load_2addr_b64 v[1:4], v1 offset1:1
s_waitcnt lgkmcnt(0)
global_store_b128 v7, v[1:4], s[4:5]
s_waitcnt_vscnt null, 0x0
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_38
s_add_u32 s0, s2, s0
s_addc_u32 s1, s3, s1
s_add_u32 s8, s0, 8
s_addc_u32 s9, s1, 0
s_add_u32 s10, s0, 4
s_addc_u32 s11, s1, 0
s_getpc_b64 s[12:13]
s_add_u32 s12, s12, devBestSol@rel32@lo+4
s_addc_u32 s13, s13, devBestSol@rel32@hi+12
s_branch .LBB0_33
.LBB0_31:
s_mov_b32 s0, 0
.LBB0_32:
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cndmask_b32_e64 v1, s6, v0, s0
v_mov_b32_e32 v2, v0
global_atomic_cmpswap_b32 v1, v7, v[1:2], s[12:13] glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v1, v0
s_or_b32 s7, vcc_lo, s7
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s7
s_cbranch_execz .LBB0_38
.LBB0_33:
global_load_b32 v0, v7, s[12:13]
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 4, v[0:1]
v_add_co_u32 v3, vcc_lo, s2, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v2, vcc_lo
global_load_b64 v[3:4], v[3:4], off offset:8
s_waitcnt vmcnt(0)
v_cmp_eq_f64_e32 vcc_lo, -1.0, v[3:4]
s_cbranch_vccnz .LBB0_31
global_load_b64 v[5:6], v7, s[8:9]
s_waitcnt vmcnt(0)
v_cmp_eq_f64_e32 vcc_lo, -1.0, v[5:6]
v_cmp_gt_f64_e64 s0, v[5:6], v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_or_b32 s0, vcc_lo, s0
s_and_b32 vcc_lo, exec_lo, s0
s_mov_b32 s0, -1
s_cbranch_vccnz .LBB0_32
v_cmp_neq_f64_e32 vcc_lo, v[5:6], v[3:4]
s_cbranch_vccnz .LBB0_31
v_add_co_u32 v3, vcc_lo, s2, v1
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v2, vcc_lo
s_clause 0x1
global_load_b32 v5, v7, s[4:5]
global_load_b32 v3, v[3:4], off
s_waitcnt vmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, v5, v3
s_cbranch_vccnz .LBB0_32
v_add_co_u32 v1, vcc_lo, s2, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s3, v2, vcc_lo
s_clause 0x1
global_load_b32 v3, v7, s[10:11]
global_load_b32 v1, v[1:2], off offset:4
s_waitcnt vmcnt(0)
v_cmp_gt_i32_e64 s0, v3, v1
s_branch .LBB0_32
.LBB0_38:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel computeMedoids
.amdhsa_group_segment_fixed_size 20480
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 27
.amdhsa_next_free_sgpr 32
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size computeMedoids, .Lfunc_end0-computeMedoids
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected devBestSol
.type devBestSol,@object
.section .bss,"aw",@nobits
.globl devBestSol
.p2align 2, 0x0
devBestSol:
.long 0
.size devBestSol, 4
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym devBestSol
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 20480
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: computeMedoids
.private_segment_fixed_size: 0
.sgpr_count: 34
.sgpr_spill_count: 0
.symbol: computeMedoids.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 27
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
// Number of threads per block.
#define NT 1024
// Structure for a 3-D point.
typedef struct {
double x;
double y;
double z;
}point_t;
// Structure for a solution.
typedef struct {
int a;
int b;
double d;
}solution_t;
// Variables in global memory.
__device__ int devBestSol;
// Per-thread variables in shared memory.
__shared__ solution_t shrSols[NT];
__shared__ int shrBestSolIndex[NT];
/**
* Calculates the city-block distance between two point_t structs as defiend
* by this function. distance(P1,P2) = |x1 β x2| + |y1 β y2| + |z1 β z2|
* @param p1 A pointer to the first point.
* @param p2 A pointer to the second point.
* @return The city block distance between p1 and p2.
*/
__device__ double distance(point_t *p1, point_t *p2) {
double tempX = p1->x - p2->x;
if (tempX < 0) {
tempX *= -1;
}
double tempY = p1->y - p2->y;
if (tempY < 0) {
tempY *= -1;
}
double tempZ = p1->z - p2->z;
if (tempZ < 0) {
tempZ *= -1;
}
return tempX + tempY + tempZ;
}
/**
* Compares to different solution_t to find the one with the lowest distance then a index and finall b index.
* @param a Pointer to first solution.
* @param b Pointer to second solution.
* @return true if a is the better solution false otherwise.
*/
__device__ bool compareSol(solution_t *a, solution_t *b){
bool aIsbest = false;
if(a->d == -1.0){aIsbest = false;}
else if(b->d == -1.0){aIsbest = true;}
else if (b->d > a->d) {
aIsbest = true;
} else if (b->d == a->d) {
if (b->a > a->a){
aIsbest = true;
}else{
if (b->b > a->b){
aIsbest = true;
}
}
}
return aIsbest;
}
/**
* Device kernel to calculate the distance for each point to its closest medoid.
*
* Called with a one-dimensional grid of one-dimensional blocks, N blocks, NT
* threads per block. N = number of points. Each block finds the best solution
* for its given A index. Each thread within a block computes its total
* distance for its B index(s).
*
* @param pointList Array of all the points.
* @param solutions Array of all the solutions the gpu finds.
* @param N Total number of points.
*/
extern "C" __global__ void computeMedoids
(point_t *pointList, solution_t *solutions, int N) {
int a = blockIdx.x, b; // X index of this block, the A medoids index
int thrd = threadIdx.x; // Index of this thread within block
point_t medA = pointList[a]; // Medoid A's point
// double *solu = &solutions[a + b * N];
double d = 0.0;
solution_t sol;
sol.a = a;
sol.b = thrd;
sol.d = -1.0;
for (b = thrd; b < N; b += NT) {
d = 0;
solution_t tempSol;
for (int p = 0; p < N; p++){
point_t medB = pointList[b]; // Medoid B's point
if (p == a || p == b) continue;
double distA = distance( &medA, &pointList[p]);
double distB = distance( &medB, &pointList[p]);
d += (distA <= distB) ? distA : distB;
tempSol.a = a;
tempSol.b = b;
tempSol.d = (b <= a | b >= N) ? -1 : d;
}
if(!compareSol(&sol, &tempSol))
sol = tempSol;
}
shrSols[thrd] = sol;
shrBestSolIndex[thrd] = thrd;
__syncthreads();
// Reduction to find the best solution in the current block
for (int s = NT / 2; s > 0; s >>= 1) {
if (thrd < s){
shrBestSolIndex[thrd] = compareSol(&shrSols[shrBestSolIndex[thrd]], &shrSols[shrBestSolIndex[thrd + s]]) ? shrBestSolIndex[thrd] : shrBestSolIndex[thrd + s];
}
__syncthreads();
}
solutions[a] = shrSols[shrBestSolIndex[0]];
__threadfence();
// Final reduction to find the best solution overall.
if (thrd == 0) {
int oldSol;
int newSol;
do {
oldSol = devBestSol;
newSol = compareSol(&solutions[devBestSol], &solutions[a]) ? devBestSol : a;
} while (atomicCAS (&devBestSol, oldSol, newSol) != oldSol);
}
} | .text
.file "KMedoids.hip"
.globl __device_stub__computeMedoids # -- Begin function __device_stub__computeMedoids
.p2align 4, 0x90
.type __device_stub__computeMedoids,@function
__device_stub__computeMedoids: # @__device_stub__computeMedoids
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $computeMedoids, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size __device_stub__computeMedoids, .Lfunc_end0-__device_stub__computeMedoids
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $computeMedoids, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $0, (%rsp)
movl $devBestSol, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type devBestSol,@object # @devBestSol
.local devBestSol
.comm devBestSol,4,4
.type computeMedoids,@object # @computeMedoids
.section .rodata,"a",@progbits
.globl computeMedoids
.p2align 3, 0x0
computeMedoids:
.quad __device_stub__computeMedoids
.size computeMedoids, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "computeMedoids"
.size .L__unnamed_1, 15
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "devBestSol"
.size .L__unnamed_2, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__computeMedoids
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym devBestSol
.addrsig_sym computeMedoids
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0014714d_00000000-6_KMedoids.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2031:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2031:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8distanceP7point_tS0_
.type _Z8distanceP7point_tS0_, @function
_Z8distanceP7point_tS0_:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z8distanceP7point_tS0_, .-_Z8distanceP7point_tS0_
.globl _Z10compareSolP10solution_tS0_
.type _Z10compareSolP10solution_tS0_, @function
_Z10compareSolP10solution_tS0_:
.LFB2028:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2028:
.size _Z10compareSolP10solution_tS0_, .-_Z10compareSolP10solution_tS0_
.globl _Z55__device_stub__Z14computeMedoidsP7point_tP10solution_tiP7point_tP10solution_ti
.type _Z55__device_stub__Z14computeMedoidsP7point_tP10solution_tiP7point_tP10solution_ti, @function
_Z55__device_stub__Z14computeMedoidsP7point_tP10solution_tiP7point_tP10solution_ti:
.LFB2053:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq computeMedoids(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z55__device_stub__Z14computeMedoidsP7point_tP10solution_tiP7point_tP10solution_ti, .-_Z55__device_stub__Z14computeMedoidsP7point_tP10solution_tiP7point_tP10solution_ti
.globl computeMedoids
.type computeMedoids, @function
computeMedoids:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z55__device_stub__Z14computeMedoidsP7point_tP10solution_tiP7point_tP10solution_ti
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size computeMedoids, .-computeMedoids
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "computeMedoids"
.LC1:
.string "devBestSol"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq computeMedoids(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10devBestSol(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL10devBestSol
.comm _ZL10devBestSol,4,4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "KMedoids.hip"
.globl __device_stub__computeMedoids # -- Begin function __device_stub__computeMedoids
.p2align 4, 0x90
.type __device_stub__computeMedoids,@function
__device_stub__computeMedoids: # @__device_stub__computeMedoids
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $computeMedoids, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size __device_stub__computeMedoids, .Lfunc_end0-__device_stub__computeMedoids
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $computeMedoids, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $0, (%rsp)
movl $devBestSol, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type devBestSol,@object # @devBestSol
.local devBestSol
.comm devBestSol,4,4
.type computeMedoids,@object # @computeMedoids
.section .rodata,"a",@progbits
.globl computeMedoids
.p2align 3, 0x0
computeMedoids:
.quad __device_stub__computeMedoids
.size computeMedoids, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "computeMedoids"
.size .L__unnamed_1, 15
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "devBestSol"
.size .L__unnamed_2, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__computeMedoids
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym devBestSol
.addrsig_sym computeMedoids
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#define ThreadsPerBlock 512
__global__
void GammaKernel(const uchar4* const rgbaImage, uchar4* const outputImage, int numRows, int numCols, float gamma)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numRows * numCols) {
uchar4 px = rgbaImage[i];
unsigned char rcolor = round(pow((px.x / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].x = (rcolor > 255.0f) ? 255.0f : rcolor;
unsigned char gcolor = round(pow((px.y / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].y = (gcolor > 255.0f) ? 255.0f : gcolor;
unsigned char bcolor = round(pow((px.z / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].z = (bcolor > 255.0f) ? 255.0f : bcolor;
}
}
void CorreccionGamma(uchar4* const d_rgbaImage, uchar4* const d_outputImage, size_t numRows, size_t numCols, float gamma)
{
long long int total_px = numRows * numCols;
long int grids_n = ceil(total_px / ThreadsPerBlock);
const dim3 blockSize(ThreadsPerBlock, 1, 1);
const dim3 gridSize(grids_n, 1, 1);
GammaKernel <<<gridSize, blockSize >>> (d_rgbaImage, d_outputImage, numRows, numCols, gamma);
cudaDeviceSynchronize();
} | .file "tmpxft_000e991c_00000000-6_CorrecionGamma.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z44__device_stub__Z11GammaKernelPK6uchar4PS_iifPK6uchar4PS_iif
.type _Z44__device_stub__Z11GammaKernelPK6uchar4PS_iifPK6uchar4PS_iif, @function
_Z44__device_stub__Z11GammaKernelPK6uchar4PS_iifPK6uchar4PS_iif:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movss %xmm0, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11GammaKernelPK6uchar4PS_iif(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z44__device_stub__Z11GammaKernelPK6uchar4PS_iifPK6uchar4PS_iif, .-_Z44__device_stub__Z11GammaKernelPK6uchar4PS_iifPK6uchar4PS_iif
.globl _Z11GammaKernelPK6uchar4PS_iif
.type _Z11GammaKernelPK6uchar4PS_iif, @function
_Z11GammaKernelPK6uchar4PS_iif:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z11GammaKernelPK6uchar4PS_iifPK6uchar4PS_iif
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z11GammaKernelPK6uchar4PS_iif, .-_Z11GammaKernelPK6uchar4PS_iif
.globl _Z15CorreccionGammaP6uchar4S0_mmf
.type _Z15CorreccionGammaP6uchar4S0_mmf, @function
_Z15CorreccionGammaP6uchar4S0_mmf:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %r12
movq %rsi, %r13
movq %rdx, %rbx
movq %rcx, %rbp
movss %xmm0, 12(%rsp)
movl $512, 24(%rsp)
movl $1, 28(%rsp)
imulq %rcx, %rdx
leaq 511(%rdx), %rax
testq %rdx, %rdx
cmovns %rdx, %rax
sarq $9, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
cvttsd2siq %xmm0, %rax
movl %eax, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
call cudaDeviceSynchronize@PLT
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movss 12(%rsp), %xmm0
movl %ebp, %ecx
movl %ebx, %edx
movq %r13, %rsi
movq %r12, %rdi
call _Z44__device_stub__Z11GammaKernelPK6uchar4PS_iifPK6uchar4PS_iif
jmp .L12
.cfi_endproc
.LFE2057:
.size _Z15CorreccionGammaP6uchar4S0_mmf, .-_Z15CorreccionGammaP6uchar4S0_mmf
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z11GammaKernelPK6uchar4PS_iif"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z11GammaKernelPK6uchar4PS_iif(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#define ThreadsPerBlock 512
__global__
void GammaKernel(const uchar4* const rgbaImage, uchar4* const outputImage, int numRows, int numCols, float gamma)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numRows * numCols) {
uchar4 px = rgbaImage[i];
unsigned char rcolor = round(pow((px.x / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].x = (rcolor > 255.0f) ? 255.0f : rcolor;
unsigned char gcolor = round(pow((px.y / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].y = (gcolor > 255.0f) ? 255.0f : gcolor;
unsigned char bcolor = round(pow((px.z / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].z = (bcolor > 255.0f) ? 255.0f : bcolor;
}
}
void CorreccionGamma(uchar4* const d_rgbaImage, uchar4* const d_outputImage, size_t numRows, size_t numCols, float gamma)
{
long long int total_px = numRows * numCols;
long int grids_n = ceil(total_px / ThreadsPerBlock);
const dim3 blockSize(ThreadsPerBlock, 1, 1);
const dim3 gridSize(grids_n, 1, 1);
GammaKernel <<<gridSize, blockSize >>> (d_rgbaImage, d_outputImage, numRows, numCols, gamma);
cudaDeviceSynchronize();
} | #include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#define ThreadsPerBlock 512
__global__
void GammaKernel(const uchar4* const rgbaImage, uchar4* const outputImage, int numRows, int numCols, float gamma)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numRows * numCols) {
uchar4 px = rgbaImage[i];
unsigned char rcolor = round(pow((px.x / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].x = (rcolor > 255.0f) ? 255.0f : rcolor;
unsigned char gcolor = round(pow((px.y / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].y = (gcolor > 255.0f) ? 255.0f : gcolor;
unsigned char bcolor = round(pow((px.z / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].z = (bcolor > 255.0f) ? 255.0f : bcolor;
}
}
void CorreccionGamma(uchar4* const d_rgbaImage, uchar4* const d_outputImage, size_t numRows, size_t numCols, float gamma)
{
long long int total_px = numRows * numCols;
long int grids_n = ceil(total_px / ThreadsPerBlock);
const dim3 blockSize(ThreadsPerBlock, 1, 1);
const dim3 gridSize(grids_n, 1, 1);
GammaKernel <<<gridSize, blockSize >>> (d_rgbaImage, d_outputImage, numRows, numCols, gamma);
hipDeviceSynchronize();
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#define ThreadsPerBlock 512
__global__
void GammaKernel(const uchar4* const rgbaImage, uchar4* const outputImage, int numRows, int numCols, float gamma)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numRows * numCols) {
uchar4 px = rgbaImage[i];
unsigned char rcolor = round(pow((px.x / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].x = (rcolor > 255.0f) ? 255.0f : rcolor;
unsigned char gcolor = round(pow((px.y / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].y = (gcolor > 255.0f) ? 255.0f : gcolor;
unsigned char bcolor = round(pow((px.z / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].z = (bcolor > 255.0f) ? 255.0f : bcolor;
}
}
void CorreccionGamma(uchar4* const d_rgbaImage, uchar4* const d_outputImage, size_t numRows, size_t numCols, float gamma)
{
long long int total_px = numRows * numCols;
long int grids_n = ceil(total_px / ThreadsPerBlock);
const dim3 blockSize(ThreadsPerBlock, 1, 1);
const dim3 gridSize(grids_n, 1, 1);
GammaKernel <<<gridSize, blockSize >>> (d_rgbaImage, d_outputImage, numRows, numCols, gamma);
hipDeviceSynchronize();
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.globl _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.p2align 8
.type _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif,@function
_Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_mul_i32 s2, s3, s2
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v1
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b128 s[12:15], s[0:1], 0x0
s_load_b32 s3, s[0:1], 0x18
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s12, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s13, v1, vcc_lo
v_div_scale_f32 v14, s0, 1.0, s3, 1.0
s_mov_b32 s13, 0x3e76c4e1
s_clause 0x2
global_load_u8 v4, v[2:3], off
global_load_u8 v5, v[2:3], off offset:1
global_load_u8 v2, v[2:3], off offset:2
v_div_scale_f32 v3, null, s3, s3, 1.0
s_delay_alu instid0(VALU_DEP_1)
v_rcp_f32_e32 v6, v3
s_waitcnt_depctr 0xfff
v_fma_f32 v10, -v3, v6, 1.0
s_waitcnt vmcnt(2)
v_cvt_f32_ubyte0_e32 v4, v4
s_waitcnt vmcnt(1)
v_cvt_f32_ubyte0_e32 v5, v5
s_waitcnt vmcnt(0)
v_cvt_f32_ubyte0_e32 v2, v2
v_div_scale_f32 v7, null, 0x437f0000, 0x437f0000, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_div_scale_f32 v8, null, 0x437f0000, 0x437f0000, v5
v_div_scale_f32 v9, null, 0x437f0000, 0x437f0000, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_rcp_f32_e32 v11, v7
v_rcp_f32_e32 v12, v8
v_div_scale_f32 v15, s1, v5, 0x437f0000, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(TRANS32_DEP_3)
v_rcp_f32_e32 v13, v9
v_div_scale_f32 v19, s2, v2, 0x437f0000, v2
v_fma_f32 v16, -v7, v11, 1.0
v_fmac_f32_e32 v6, v10, v6
v_div_scale_f32 v10, vcc_lo, v4, 0x437f0000, v4
s_delay_alu instid0(TRANS32_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_fma_f32 v17, -v8, v12, 1.0
v_fmac_f32_e32 v11, v16, v11
s_delay_alu instid0(TRANS32_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_fma_f32 v18, -v9, v13, 1.0
v_mul_f32_e32 v16, v14, v6
v_dual_fmac_f32 v12, v17, v12 :: v_dual_mul_f32 v17, v10, v11
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_fmac_f32_e32 v13, v18, v13
v_fma_f32 v21, -v3, v16, v14
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_mul_f32_e32 v18, v15, v12
v_fma_f32 v22, -v7, v17, v10
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mul_f32_e32 v20, v19, v13
v_fma_f32 v23, -v8, v18, v15
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_fmac_f32_e32 v17, v22, v11
v_fma_f32 v24, -v9, v20, v19
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_fmac_f32_e32 v18, v23, v12
v_fma_f32 v7, -v7, v17, v10
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_fmac_f32_e32 v20, v24, v13
v_fma_f32 v8, -v8, v18, v15
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_div_fmas_f32 v7, v7, v11, v17
s_mov_b32 vcc_lo, s0
v_fma_f32 v9, -v9, v20, v19
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_div_fixup_f32 v7, v7, 0x437f0000, v4
v_fmac_f32_e32 v16, v21, v6
v_fma_f32 v3, -v3, v16, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_div_fmas_f32 v3, v3, v6, v16
s_mov_b32 vcc_lo, s1
v_div_fmas_f32 v6, v8, v12, v18
s_mov_b32 vcc_lo, s2
v_div_fixup_f32 v4, v3, s3, 1.0
v_div_fmas_f32 v8, v9, v13, v20
v_cmp_neq_f32_e32 vcc_lo, 1.0, v7
v_div_fixup_f32 v5, v6, 0x437f0000, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_div_fixup_f32 v8, v8, 0x437f0000, v2
v_cndmask_b32_e32 v2, 1.0, v4, vcc_lo
v_cmp_neq_f32_e32 vcc_lo, 1.0, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_cmp_neq_f32_e64 s16, v2, |v2|
v_cndmask_b32_e32 v3, 1.0, v4, vcc_lo
v_cmp_neq_f32_e32 vcc_lo, 1.0, v8
v_cmp_neq_f32_e64 s17, v3, |v3|
v_cndmask_b32_e32 v4, 1.0, v4, vcc_lo
v_cmp_neq_f32_e32 vcc_lo, 0, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_cmp_neq_f32_e64 s0, 0, v4
v_cndmask_b32_e32 v7, 1.0, v7, vcc_lo
v_cmp_neq_f32_e32 vcc_lo, 0, v3
v_frexp_mant_f32_e32 v9, v7
v_cmp_gt_f32_e64 s9, 1.0, v7
v_cmp_eq_f32_e64 s1, 0, v7
v_frexp_exp_i32_f32_e32 v10, v7
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cmp_gt_f32_e64 s11, 0x3f2aaaab, v9
s_xor_b32 s9, s16, s9
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v16, |v2|, 0, s9
v_cndmask_b32_e64 v15, 0, 1, s11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ldexp_f32 v9, v9, v15
v_add_f32_e32 v15, 1.0, v9
v_cndmask_b32_e32 v6, 1.0, v5, vcc_lo
v_cndmask_b32_e64 v5, 1.0, v8, s0
v_add_f32_e32 v22, -1.0, v9
v_cmp_gt_f32_e32 vcc_lo, 0, v3
v_add_f32_e32 v27, -1.0, v15
v_cmp_gt_f32_e64 s10, 1.0, v6
v_frexp_mant_f32_e32 v13, v5
v_frexp_mant_f32_e32 v11, v6
v_rcp_f32_e32 v20, v15
v_frexp_exp_i32_f32_e32 v12, v6
s_xor_b32 s9, s17, s10
v_sub_f32_e32 v9, v9, v27
v_cndmask_b32_e64 v18, |v3|, 0, s9
v_cmp_gt_f32_e64 s9, 0x3f2aaaab, v13
v_subrev_co_ci_u32_e64 v10, s10, 0, v10, s11
v_frexp_exp_i32_f32_e32 v14, v5
v_cmp_eq_f32_e64 s3, 0, v6
s_delay_alu instid0(VALU_DEP_4)
v_cndmask_b32_e64 v19, 0, 1, s9
v_mul_f32_e32 v26, v22, v20
v_cvt_f32_i32_e32 v10, v10
v_cmp_eq_f32_e64 s0, 0x7f800000, v7
v_cmp_eq_f32_e64 s2, 0x7f800000, v6
v_ldexp_f32 v13, v13, v19
v_mul_f32_e32 v31, v15, v26
v_cmp_gt_f32_e64 s7, 1.0, v5
v_cmp_eq_f32_e64 s5, 0, v5
v_cmp_eq_f32_e64 s4, 0x7f800000, v5
v_add_f32_e32 v19, 1.0, v13
v_cmp_gt_f32_e64 s8, 0, v2
v_add_f32_e32 v25, -1.0, v13
v_fma_f32 v15, v26, v15, -v31
v_cmp_neq_f32_e64 s12, v4, |v4|
v_rcp_f32_e32 v23, v19
s_xor_b32 s8, s8, s1
v_cmp_gt_f32_e64 s6, 0, v4
v_cndmask_b32_e64 v8, 0x7f800000, 0, s8
v_cmp_gt_f32_e64 s8, 0x3f2aaaab, v11
s_xor_b32 s7, s12, s7
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
s_xor_b32 s6, s6, s5
v_cndmask_b32_e64 v17, 0, 1, s8
v_subrev_co_ci_u32_e64 v12, s8, 0, v12, s8
v_subrev_co_ci_u32_e64 v14, s8, 0, v14, s9
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ldexp_f32 v11, v11, v17
v_cvt_f32_i32_e32 v12, v12
s_xor_b32 s8, vcc_lo, s3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
v_cvt_f32_i32_e32 v14, v14
v_cmp_eq_f32_e32 vcc_lo, 1.0, v7
v_add_f32_e32 v17, 1.0, v11
v_add_f32_e32 v24, -1.0, v11
v_cndmask_b32_e32 v7, v16, v7, vcc_lo
v_rcp_f32_e32 v21, v17
v_dual_add_f32 v29, -1.0, v17 :: v_dual_mul_f32 v30, v25, v23
v_cmp_eq_f32_e32 vcc_lo, 1.0, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_sub_f32_e32 v11, v11, v29
v_mul_f32_e32 v27, v19, v30
v_cndmask_b32_e32 v6, v18, v6, vcc_lo
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v28, v24, v21
v_add_f32_e32 v32, -1.0, v19
v_fma_f32 v19, v30, v19, -v27
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mul_f32_e32 v33, v17, v28
v_sub_f32_e32 v13, v13, v32
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v17, v28, v17, -v33
v_fmac_f32_e32 v17, v28, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mul_f32 v11, 0x3f317218, v12 :: v_dual_add_f32 v32, v33, v17
v_sub_f32_e32 v37, v24, v32
v_sub_f32_e32 v33, v32, v33
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_sub_f32_e32 v24, v24, v37
v_fmac_f32_e32 v19, v30, v13
v_add_f32_e32 v35, v27, v19
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_fmac_f32 v15, v26, v9 :: v_dual_sub_f32 v38, v25, v35
v_add_f32_e32 v13, v31, v15
v_mul_f32_e32 v9, 0x3f317218, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_sub_f32 v25, v25, v38 :: v_dual_sub_f32 v36, v22, v13
v_fma_f32 v34, v10, 0x3f317218, -v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_sub_f32 v22, v22, v36 :: v_dual_sub_f32 v17, v33, v17
v_dual_fmac_f32 v34, 0xb102e308, v10 :: v_dual_sub_f32 v31, v13, v31
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_sub_f32 v13, v22, v13 :: v_dual_sub_f32 v22, v24, v32
v_add_f32_e32 v17, v17, v22
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_dual_add_f32 v10, v37, v17 :: v_dual_mul_f32 v29, 0x3f317218, v14
v_sub_f32_e32 v24, v25, v35
v_dual_sub_f32 v27, v35, v27 :: v_dual_mul_f32 v10, v21, v10
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v22, v14, 0x3f317218, -v29
v_fmac_f32_e32 v22, 0xb102e308, v14
v_dual_add_f32 v14, v9, v34 :: v_dual_sub_f32 v15, v31, v15
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v19, v27, v19
v_add_f32_e32 v13, v15, v13
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_f32_e32 v19, v19, v24
v_fma_f32 v15, v12, 0x3f317218, -v11
v_add_f32_e32 v13, v36, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v13, v20, v13
v_dual_add_f32 v20, v28, v10 :: v_dual_add_f32 v17, v26, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v24, v17, v26
v_sub_f32_e32 v26, v20, v28
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_dual_sub_f32 v10, v10, v26 :: v_dual_sub_f32 v13, v13, v24
v_fmac_f32_e32 v15, 0xb102e308, v12
v_dual_mul_f32 v27, v20, v20 :: v_dual_add_f32 v32, v10, v10
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_f32_e32 v26, v13, v13
v_add_f32_e32 v12, v38, v19
v_mul_f32_e32 v12, v23, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v23, v30, v12
v_sub_f32_e32 v28, v23, v30
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_sub_f32_e32 v12, v12, v28
v_fma_f32 v28, v20, v20, -v27
v_fmac_f32_e32 v28, v20, v32
v_ldexp_f32 v32, v17, 1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v35, v27, v28
v_dual_mul_f32 v25, v17, v17 :: v_dual_mul_f32 v40, v20, v35
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v30, v17, v17, -v25
v_dual_fmac_f32 v30, v17, v26 :: v_dual_add_f32 v19, v11, v15
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v26, v25, v30
v_mul_f32_e32 v38, v17, v26
v_fmaak_f32 v37, s13, v26, 0x3e91f4c4
v_dual_sub_f32 v25, v26, v25 :: v_dual_mul_f32 v24, v23, v23
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fmaak_f32 v37, v26, v37, 0x3ecccdef
v_sub_f32_e32 v25, v30, v25
v_fma_f32 v30, v26, v17, -v38
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_fma_f32 v31, v23, v23, -v24
v_mul_f32_e32 v43, v26, v37
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fmac_f32_e32 v30, v26, v13
v_fma_f32 v26, v26, v37, -v43
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_fmac_f32 v26, v25, v37 :: v_dual_sub_f32 v27, v35, v27
v_sub_f32_e32 v27, v28, v27
v_fma_f32 v28, v35, v20, -v40
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_dual_fmac_f32 v28, v35, v10 :: v_dual_fmaak_f32 v39, s13, v35, 0x3e91f4c4
v_ldexp_f32 v10, v10, 1
v_ldexp_f32 v13, v13, 1
v_fmac_f32_e32 v28, v27, v20
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmaak_f32 v39, v35, v39, 0x3ecccdef
v_dual_add_f32 v33, v12, v12 :: v_dual_mul_f32 v44, v35, v39
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_fmac_f32_e32 v31, v23, v33
v_add_f32_e32 v21, v29, v22
v_ldexp_f32 v33, v20, 1
v_dual_add_f32 v36, v24, v31 :: v_dual_sub_f32 v9, v14, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_sub_f32_e32 v24, v36, v24
v_mul_f32_e32 v42, v23, v36
v_fmaak_f32 v41, s13, v36, 0x3e91f4c4
v_sub_f32_e32 v24, v31, v24
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v31, v36, v23, -v42
v_fmac_f32_e32 v31, v36, v12
v_ldexp_f32 v12, v12, 1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v31, v24, v23
v_fmaak_f32 v41, v36, v41, 0x3ecccdef
v_mul_f32_e32 v45, v36, v41
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_fma_f32 v20, v36, v41, -v45
v_add_f32_e32 v36, v42, v31
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v20, v24, v41
v_add_f32_e32 v37, v45, v20
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v45, v37, v45
v_sub_f32_e32 v20, v20, v45
v_add_f32_e32 v46, 0x3f2aaaaa, v37
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_f32_e32 v20, 0x31739010, v20
v_add_f32_e32 v24, v43, v26
v_add_f32_e32 v45, 0xbf2aaaaa, v46
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v41, v24, v43
v_sub_f32_e32 v37, v37, v45
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v26, v26, v41
v_add_f32_e32 v20, v20, v37
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_dual_add_f32 v26, 0x31739010, v26 :: v_dual_sub_f32 v11, v19, v11
v_fmac_f32_e32 v30, v25, v17
v_fma_f32 v17, v35, v39, -v44
v_sub_f32_e32 v11, v15, v11
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f32_e32 v25, v38, v30
v_fmac_f32_e32 v17, v27, v39
v_add_f32_e32 v39, 0x3f2aaaaa, v24
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v41, 0xbf2aaaaa, v39
v_sub_f32_e32 v24, v24, v41
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_dual_add_f32 v35, v44, v17 :: v_dual_add_f32 v24, v26, v24
v_dual_sub_f32 v26, v25, v38 :: v_dual_add_f32 v27, v40, v28
v_dual_sub_f32 v38, v36, v42 :: v_dual_add_f32 v37, v39, v24
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_sub_f32_e32 v26, v30, v26
v_dual_add_f32 v30, v46, v20 :: v_dual_add_f32 v43, 0x3f2aaaaa, v35
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_sub_f32 v44, v35, v44 :: v_dual_mul_f32 v41, v25, v37
v_mul_f32_e32 v45, v36, v30
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_sub_f32 v17, v17, v44 :: v_dual_add_f32 v44, 0xbf2aaaaa, v43
v_dual_sub_f32 v35, v35, v44 :: v_dual_sub_f32 v44, v46, v30
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_f32_e32 v20, v20, v44
v_fma_f32 v44, v36, v30, -v45
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_add_f32 v17, 0x31739010, v17 :: v_dual_fmac_f32 v44, v36, v20
v_add_f32_e32 v17, v17, v35
v_sub_f32_e32 v35, v27, v40
v_ldexp_f32 v20, v23, 1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v40, v43, v17
v_sub_f32_e32 v42, v43, v40
v_mul_f32_e32 v43, v27, v40
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f32_e32 v17, v17, v42
v_fma_f32 v42, v27, v40, -v43
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v42, v27, v17
v_sub_f32_e32 v17, v31, v38
v_dual_fmac_f32 v44, v17, v30 :: v_dual_sub_f32 v39, v39, v37
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_f32_e32 v24, v24, v39
v_fma_f32 v39, v25, v37, -v41
v_fmac_f32_e32 v39, v25, v24
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v39, v26, v37
v_add_f32_e32 v17, v41, v39
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_f32_e32 v26, v32, v17
v_sub_f32_e32 v24, v28, v35
v_sub_f32_e32 v32, v26, v32
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
v_fmac_f32_e32 v42, v24, v40
v_add_f32_e32 v24, v45, v44
v_sub_f32_e32 v25, v17, v41
v_sub_f32_e32 v17, v17, v32
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v31, v20, v24
v_sub_f32_e32 v20, v31, v20
v_sub_f32_e32 v30, v24, v45
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v20, v24, v20
v_sub_f32_e32 v30, v44, v30
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v12, v12, v30
v_dual_add_f32 v12, v12, v20 :: v_dual_sub_f32 v25, v39, v25
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v13, v13, v25
v_add_f32_e32 v13, v13, v17
v_sub_f32_e32 v17, v21, v29
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_add_f32 v20, v26, v13 :: v_dual_add_f32 v23, v43, v42
v_dual_sub_f32 v17, v22, v17 :: v_dual_add_f32 v22, v31, v12
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f32_e32 v24, v14, v20
v_add_f32_e32 v28, v33, v23
v_sub_f32_e32 v27, v23, v43
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v33, v28, v33
v_sub_f32_e32 v27, v42, v27
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_sub_f32 v23, v23, v33 :: v_dual_add_f32 v10, v10, v27
v_dual_add_f32 v10, v10, v23 :: v_dual_sub_f32 v23, v20, v26
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v15, v28, v10
v_add_f32_e32 v26, v19, v15
v_sub_f32_e32 v27, v22, v31
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_dual_sub_f32 v12, v12, v27 :: v_dual_sub_f32 v25, v15, v28
v_add_f32_e32 v28, v21, v22
v_sub_f32_e32 v10, v10, v25
v_sub_f32_e32 v25, v26, v19
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_sub_f32_e32 v30, v26, v25
v_sub_f32_e32 v15, v15, v25
v_add_f32_e32 v25, v11, v10
v_sub_f32_e32 v9, v34, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_sub_f32 v19, v19, v30 :: v_dual_sub_f32 v30, v25, v11
v_add_f32_e32 v15, v15, v19
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_sub_f32_e32 v19, v25, v30
v_dual_sub_f32 v10, v10, v30 :: v_dual_sub_f32 v13, v13, v23
v_sub_f32_e32 v23, v24, v14
v_sub_f32_e32 v11, v11, v19
v_sub_f32_e32 v27, v28, v21
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_sub_f32_e32 v20, v20, v23
v_add_f32_e32 v10, v10, v11
v_sub_f32_e32 v29, v24, v23
v_add_f32_e32 v23, v9, v13
v_sub_f32_e32 v31, v28, v27
v_sub_f32_e32 v22, v22, v27
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_sub_f32_e32 v14, v14, v29
v_sub_f32_e32 v29, v23, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_sub_f32 v21, v21, v31 :: v_dual_add_f32 v14, v20, v14
v_sub_f32_e32 v20, v23, v29
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_dual_add_f32 v21, v22, v21 :: v_dual_add_f32 v14, v23, v14
v_add_f32_e32 v27, v17, v12
v_sub_f32_e32 v9, v9, v20
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_sub_f32_e32 v31, v27, v17
v_dual_add_f32 v20, v27, v21 :: v_dual_add_f32 v21, v24, v14
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v22, v27, v31
v_dual_sub_f32 v12, v12, v31 :: v_dual_sub_f32 v17, v17, v22
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v22, v21, v24
v_dual_add_f32 v12, v12, v17 :: v_dual_add_f32 v15, v25, v15
v_cndmask_b32_e64 v17, 0x7f800000, 0, s6
v_cmp_class_f32_e64 s6, v2, 0x204
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v23, v26, v15
v_dual_sub_f32 v24, v23, v26 :: v_dual_sub_f32 v13, v13, v29
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f32_e32 v9, v13, v9
v_dual_sub_f32 v13, v14, v22 :: v_dual_sub_f32 v14, v15, v24
v_cndmask_b32_e64 v15, 0x7f800000, 0, s8
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_dual_add_f32 v9, v9, v13 :: v_dual_add_f32 v10, v10, v14
v_cndmask_b32_e64 v13, |v4|, 0, s7
v_add_f32_e32 v14, v23, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_sub_f32 v23, v14, v23 :: v_dual_mul_f32 v24, v3, v14
v_sub_f32_e32 v10, v10, v23
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v14, v3, v14, -v24
v_fmac_f32_e32 v14, v3, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v10, v24, v14
v_sub_f32_e32 v18, v10, v24
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_add_f32 v19, v28, v20 :: v_dual_sub_f32 v14, v14, v18
v_sub_f32_e32 v11, v19, v28
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v11, v20, v11
v_dual_add_f32 v11, v12, v11 :: v_dual_add_f32 v12, v21, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_add_f32 v20, v19, v11 :: v_dual_sub_f32 v21, v12, v21
v_dual_mul_f32 v22, v2, v12 :: v_dual_sub_f32 v19, v20, v19
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v9, v9, v21
v_fma_f32 v12, v2, v12, -v22
v_mul_f32_e32 v25, v4, v20
v_cmp_class_f32_e64 vcc_lo, v22, 0x204
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_dual_sub_f32 v11, v11, v19 :: v_dual_fmac_f32 v12, v2, v9
v_fma_f32 v19, v4, v20, -v25
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v9, v22, v12
v_dual_fmac_f32 v19, v4, v11 :: v_dual_sub_f32 v16, v9, v22
v_cndmask_b32_e32 v9, v9, v22, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v24, 0x204
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_dual_add_f32 v11, v25, v19 :: v_dual_sub_f32 v12, v12, v16
v_cndmask_b32_e32 v10, v10, v24, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v25, 0x204
v_cndmask_b32_e32 v20, v11, v25, vcc_lo
v_cmp_eq_f32_e32 vcc_lo, 0x42b17218, v9
v_sub_f32_e32 v11, v11, v25
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v21, 0, 0x37000000, vcc_lo
v_cmp_eq_f32_e32 vcc_lo, 0x42b17218, v10
v_dual_sub_f32 v11, v19, v11 :: v_dual_sub_f32 v24, v9, v21
v_cndmask_b32_e64 v22, 0, 0x37000000, vcc_lo
v_cmp_eq_f32_e32 vcc_lo, 0x42b17218, v20
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_dual_mul_f32 v26, 0x3fb8aa3b, v24 :: v_dual_sub_f32 v25, v10, v22
v_cndmask_b32_e64 v23, 0, 0x37000000, vcc_lo
v_cmp_neq_f32_e64 vcc_lo, 0x7f800000, |v9|
v_fma_f32 v27, v24, 0x3fb8aa3b, -v26
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_mul_f32_e32 v18, 0x3fb8aa3b, v25
v_sub_f32_e32 v16, v20, v23
v_rndne_f32_e32 v28, v26
v_cndmask_b32_e32 v9, 0, v12, vcc_lo
v_fmac_f32_e32 v27, 0x32a5705f, v24
v_fma_f32 v29, v25, 0x3fb8aa3b, -v18
v_mul_f32_e32 v19, 0x3fb8aa3b, v16
v_rndne_f32_e32 v30, v18
v_sub_f32_e32 v26, v26, v28
v_cmp_neq_f32_e64 vcc_lo, 0x7f800000, |v10|
v_fmac_f32_e32 v29, 0x32a5705f, v25
v_fma_f32 v31, v16, 0x3fb8aa3b, -v19
v_rndne_f32_e32 v32, v19
v_sub_f32_e32 v18, v18, v30
v_add_f32_e32 v12, v26, v27
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_dual_cndmask_b32 v10, 0, v14 :: v_dual_fmac_f32 v31, 0x32a5705f, v16
v_dual_sub_f32 v19, v19, v32 :: v_dual_add_f32 v18, v18, v29
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_exp_f32_e32 v12, v12
v_cmp_neq_f32_e64 vcc_lo, 0x7f800000, |v20|
v_cvt_i32_f32_e32 v20, v30
v_add_f32_e32 v19, v19, v31
v_exp_f32_e32 v14, v18
v_add_f32_e32 v9, v21, v9
v_cndmask_b32_e32 v11, 0, v11, vcc_lo
v_cmp_ngt_f32_e32 vcc_lo, 0xc2ce8ed0, v24
v_exp_f32_e32 v18, v19
v_cvt_i32_f32_e32 v19, v28
v_cvt_i32_f32_e32 v21, v32
v_dual_add_f32 v10, v22, v10 :: v_dual_add_f32 v11, v23, v11
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(TRANS32_DEP_2)
v_ldexp_f32 v12, v12, v19
v_ldexp_f32 v14, v14, v20
s_waitcnt_depctr 0xfff
v_ldexp_f32 v18, v18, v21
v_cndmask_b32_e32 v12, 0, v12, vcc_lo
v_cmp_ngt_f32_e32 vcc_lo, 0xc2ce8ed0, v25
v_cndmask_b32_e32 v14, 0, v14, vcc_lo
v_cmp_ngt_f32_e32 vcc_lo, 0xc2ce8ed0, v16
v_cndmask_b32_e32 v18, 0, v18, vcc_lo
v_cmp_nlt_f32_e32 vcc_lo, 0x42b17218, v24
v_cndmask_b32_e32 v12, 0x7f800000, v12, vcc_lo
v_cmp_nlt_f32_e32 vcc_lo, 0x42b17218, v25
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_fma_f32 v9, v12, v9, v12
v_cndmask_b32_e32 v14, 0x7f800000, v14, vcc_lo
v_cmp_nlt_f32_e32 vcc_lo, 0x42b17218, v16
v_fma_f32 v10, v14, v10, v14
v_cndmask_b32_e32 v16, 0x7f800000, v18, vcc_lo
v_cmp_eq_f32_e32 vcc_lo, 1.0, v5
s_delay_alu instid0(VALU_DEP_2)
v_fma_f32 v11, v16, v11, v16
v_cndmask_b32_e32 v5, v13, v5, vcc_lo
v_cmp_eq_f32_e32 vcc_lo, 0x7f800000, v12
v_cndmask_b32_e32 v9, v9, v12, vcc_lo
v_cmp_eq_f32_e32 vcc_lo, 0x7f800000, v14
v_cndmask_b32_e32 v10, v10, v14, vcc_lo
v_cmp_eq_f32_e32 vcc_lo, 0x7f800000, v16
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v7, |v9|, v7, s6
v_cmp_class_f32_e64 s6, v3, 0x204
v_cndmask_b32_e32 v11, v11, v16, vcc_lo
s_or_b32 vcc_lo, s1, s0
v_cndmask_b32_e64 v6, |v10|, v6, s6
v_cmp_class_f32_e64 s6, v4, 0x204
v_cndmask_b32_e32 v7, v7, v8, vcc_lo
s_or_b32 vcc_lo, s3, s2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v6, v6, v15, vcc_lo
v_cndmask_b32_e64 v5, |v11|, v5, s6
s_or_b32 vcc_lo, s5, s4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_mul_f32 v7, 0x437f0000, v7 :: v_dual_mul_f32 v6, 0x437f0000, v6
v_cndmask_b32_e32 v5, v5, v17, vcc_lo
v_cmp_o_f32_e32 vcc_lo, v2, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_mul_f32_e32 v5, 0x437f0000, v5
v_cndmask_b32_e32 v2, 0x7fc00000, v7, vcc_lo
v_cmp_o_f32_e32 vcc_lo, v3, v3
v_cndmask_b32_e32 v3, 0x7fc00000, v6, vcc_lo
v_cmp_o_f32_e32 vcc_lo, v4, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_trunc_f32_e32 v6, v3
v_cndmask_b32_e32 v4, 0x7fc00000, v5, vcc_lo
v_trunc_f32_e32 v5, v2
v_sub_f32_e32 v9, v3, v6
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_trunc_f32_e32 v7, v4
v_sub_f32_e32 v8, v2, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v10, v4, v7
v_cmp_ge_f32_e64 s0, |v8|, 0.5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v8, 0, 1.0, s0
v_cmp_ge_f32_e64 s0, |v9|, 0.5
v_bfi_b32 v2, 0x7fffffff, v8, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cndmask_b32_e64 v9, 0, 1.0, s0
v_cmp_ge_f32_e64 s0, |v10|, 0.5
v_add_f32_e32 v2, v5, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_bfi_b32 v3, 0x7fffffff, v9, v3
v_cndmask_b32_e64 v10, 0, 1.0, s0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cvt_i32_f32_e32 v2, v2
v_add_f32_e32 v3, v6, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_bfi_b32 v4, 0x7fffffff, v10, v4
v_cvt_f32_ubyte0_e32 v2, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cvt_i32_f32_e32 v3, v3
v_add_f32_e32 v4, v7, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_nlt_f32_e32 vcc_lo, 0x437f0000, v2
v_cvt_f32_ubyte0_e32 v3, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cvt_i32_f32_e32 v4, v4
v_cndmask_b32_e32 v2, 0x437f0000, v2, vcc_lo
v_cmp_nlt_f32_e32 vcc_lo, 0x437f0000, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cvt_f32_ubyte0_e32 v4, v4
v_cvt_i32_f32_e32 v2, v2
v_cndmask_b32_e32 v3, 0x437f0000, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_nlt_f32_e32 vcc_lo, 0x437f0000, v4
v_cvt_i32_f32_e32 v3, v3
v_cndmask_b32_e32 v4, 0x437f0000, v4, vcc_lo
v_add_co_u32 v0, vcc_lo, s14, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s15, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_3)
v_cvt_i32_f32_e32 v4, v4
s_clause 0x2
global_store_b8 v[0:1], v2, off
global_store_b8 v[0:1], v3, off offset:1
global_store_b8 v[0:1], v4, off offset:2
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 47
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif, .Lfunc_end0-_Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 47
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#define ThreadsPerBlock 512
__global__
void GammaKernel(const uchar4* const rgbaImage, uchar4* const outputImage, int numRows, int numCols, float gamma)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numRows * numCols) {
uchar4 px = rgbaImage[i];
unsigned char rcolor = round(pow((px.x / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].x = (rcolor > 255.0f) ? 255.0f : rcolor;
unsigned char gcolor = round(pow((px.y / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].y = (gcolor > 255.0f) ? 255.0f : gcolor;
unsigned char bcolor = round(pow((px.z / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].z = (bcolor > 255.0f) ? 255.0f : bcolor;
}
}
void CorreccionGamma(uchar4* const d_rgbaImage, uchar4* const d_outputImage, size_t numRows, size_t numCols, float gamma)
{
long long int total_px = numRows * numCols;
long int grids_n = ceil(total_px / ThreadsPerBlock);
const dim3 blockSize(ThreadsPerBlock, 1, 1);
const dim3 gridSize(grids_n, 1, 1);
GammaKernel <<<gridSize, blockSize >>> (d_rgbaImage, d_outputImage, numRows, numCols, gamma);
hipDeviceSynchronize();
} | .text
.file "CorrecionGamma.hip"
.globl _Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif # -- Begin function _Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.p2align 4, 0x90
.type _Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif,@function
_Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif: # @_Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movss %xmm0, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif, .Lfunc_end0-_Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.cfi_endproc
# -- End function
.globl _Z15CorreccionGammaP15HIP_vector_typeIhLj4EES1_mmf # -- Begin function _Z15CorreccionGammaP15HIP_vector_typeIhLj4EES1_mmf
.p2align 4, 0x90
.type _Z15CorreccionGammaP15HIP_vector_typeIhLj4EES1_mmf,@function
_Z15CorreccionGammaP15HIP_vector_typeIhLj4EES1_mmf: # @_Z15CorreccionGammaP15HIP_vector_typeIhLj4EES1_mmf
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movss %xmm0, (%rsp) # 4-byte Spill
movq %rcx, %rbx
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %r12
movq %rcx, %rax
imulq %rdx, %rax
leaq 511(%rax), %rcx
testq %rax, %rax
cmovnsq %rax, %rcx
sarq $9, %rcx
xorps %xmm0, %xmm0
cvtsi2sd %rcx, %xmm0
cvttsd2si %xmm0, %rax
movl %eax, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $512, %rdx # imm = 0x200
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq %r12, 72(%rsp)
movq %r15, 64(%rsp)
movl %r14d, 12(%rsp)
movl %ebx, 8(%rsp)
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z15CorreccionGammaP15HIP_vector_typeIhLj4EES1_mmf, .Lfunc_end1-_Z15CorreccionGammaP15HIP_vector_typeIhLj4EES1_mmf
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif,@object # @_Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.section .rodata,"a",@progbits
.globl _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.p2align 3, 0x0
_Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif:
.quad _Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.size _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif"
.size .L__unnamed_1, 49
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000e991c_00000000-6_CorrecionGamma.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z44__device_stub__Z11GammaKernelPK6uchar4PS_iifPK6uchar4PS_iif
.type _Z44__device_stub__Z11GammaKernelPK6uchar4PS_iifPK6uchar4PS_iif, @function
_Z44__device_stub__Z11GammaKernelPK6uchar4PS_iifPK6uchar4PS_iif:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movss %xmm0, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11GammaKernelPK6uchar4PS_iif(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z44__device_stub__Z11GammaKernelPK6uchar4PS_iifPK6uchar4PS_iif, .-_Z44__device_stub__Z11GammaKernelPK6uchar4PS_iifPK6uchar4PS_iif
.globl _Z11GammaKernelPK6uchar4PS_iif
.type _Z11GammaKernelPK6uchar4PS_iif, @function
_Z11GammaKernelPK6uchar4PS_iif:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z11GammaKernelPK6uchar4PS_iifPK6uchar4PS_iif
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z11GammaKernelPK6uchar4PS_iif, .-_Z11GammaKernelPK6uchar4PS_iif
.globl _Z15CorreccionGammaP6uchar4S0_mmf
.type _Z15CorreccionGammaP6uchar4S0_mmf, @function
_Z15CorreccionGammaP6uchar4S0_mmf:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %r12
movq %rsi, %r13
movq %rdx, %rbx
movq %rcx, %rbp
movss %xmm0, 12(%rsp)
movl $512, 24(%rsp)
movl $1, 28(%rsp)
imulq %rcx, %rdx
leaq 511(%rdx), %rax
testq %rdx, %rdx
cmovns %rdx, %rax
sarq $9, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
cvttsd2siq %xmm0, %rax
movl %eax, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
call cudaDeviceSynchronize@PLT
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movss 12(%rsp), %xmm0
movl %ebp, %ecx
movl %ebx, %edx
movq %r13, %rsi
movq %r12, %rdi
call _Z44__device_stub__Z11GammaKernelPK6uchar4PS_iifPK6uchar4PS_iif
jmp .L12
.cfi_endproc
.LFE2057:
.size _Z15CorreccionGammaP6uchar4S0_mmf, .-_Z15CorreccionGammaP6uchar4S0_mmf
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z11GammaKernelPK6uchar4PS_iif"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z11GammaKernelPK6uchar4PS_iif(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "CorrecionGamma.hip"
.globl _Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif # -- Begin function _Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.p2align 4, 0x90
.type _Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif,@function
_Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif: # @_Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movss %xmm0, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif, .Lfunc_end0-_Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.cfi_endproc
# -- End function
.globl _Z15CorreccionGammaP15HIP_vector_typeIhLj4EES1_mmf # -- Begin function _Z15CorreccionGammaP15HIP_vector_typeIhLj4EES1_mmf
.p2align 4, 0x90
.type _Z15CorreccionGammaP15HIP_vector_typeIhLj4EES1_mmf,@function
_Z15CorreccionGammaP15HIP_vector_typeIhLj4EES1_mmf: # @_Z15CorreccionGammaP15HIP_vector_typeIhLj4EES1_mmf
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movss %xmm0, (%rsp) # 4-byte Spill
movq %rcx, %rbx
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %r12
movq %rcx, %rax
imulq %rdx, %rax
leaq 511(%rax), %rcx
testq %rax, %rax
cmovnsq %rax, %rcx
sarq $9, %rcx
xorps %xmm0, %xmm0
cvtsi2sd %rcx, %xmm0
cvttsd2si %xmm0, %rax
movl %eax, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $512, %rdx # imm = 0x200
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq %r12, 72(%rsp)
movq %r15, 64(%rsp)
movl %r14d, 12(%rsp)
movl %ebx, 8(%rsp)
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z15CorreccionGammaP15HIP_vector_typeIhLj4EES1_mmf, .Lfunc_end1-_Z15CorreccionGammaP15HIP_vector_typeIhLj4EES1_mmf
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif,@object # @_Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.section .rodata,"a",@progbits
.globl _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.p2align 3, 0x0
_Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif:
.quad _Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.size _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif"
.size .L__unnamed_1, 49
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11GammaKernelPK15HIP_vector_typeIhLj4EEPS0_iif
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda.h>
#include <stdio.h>
int main(int argc, char** argv) {
struct cudaDeviceProp p;
int device;
cudaGetDevice(&device);
cudaGetDeviceProperties(&p, device);
printf("> %s\n"
"\ttotalGlobalMem: %u B\n"
"\tsharedMemPerBlock: %u B\n" "\tregsPerBlock: %d\n"
"\twarpSize: %d threads\n"
"\tmemPitch: %u B\n"
"\tmaxThreadsPerBlock: %d\n"
"\tmaxThreadsDim: (%d, %d, %d)\n"
"\tmaxGridSize: (%d, %d, %d)\n"
"\tclockRate: %d kHz\n"
"\ttotalConstMem: %u B\n"
"\tCompute Capability: %d.%d\n"
"\ttextureAlignment: %u\n"
"\tdeviceOverlap: %d\n"
"\tmultiProcessorCount: %d\n"
"\tkernelExecTimeoutEnabled: %d\n"
"\tintegrated: %d\n"
"\tcanMapHostMemory: %d\n"
"\tcomputeMode: %d\n"
"\tmaxTexture1D: %d\n"
"\tmaxTexture2D: (%d, %d)\n"
"\tmaxTexture3D: (%d, %d, %d)\n"
"\tmaxTexture1DLayered: (%d, %d)\n"
"\tmaxTexture2DLayered: (%d, %d, %d)\n"
"\tsurfaceAlignment: %u\n"
"\tconcurrentKernels: %d\n"
"\tECCEnabled: %d\n"
"\tPCI Bus ID: %d:%d.%d\n"
"\ttccDriver: %d\n"
"\tasyncEngineCount: %d\n"
"\tunifiedAddressing: %d\n"
"\tmemoryClockRate: %d kHz\n"
"\tmemoryBusWidth: %d bits\n"
"\tl2CacheSize: %d B\n"
"\tmaxThreadsPerMultiProcessor: %d\n",
p.name, p.totalGlobalMem, p.sharedMemPerBlock, p.regsPerBlock,
p.warpSize, p.memPitch, p.maxThreadsPerBlock, p.maxThreadsDim[0],
p.maxThreadsDim[1], p.maxThreadsDim[2], p.maxGridSize[0],
p.maxGridSize[1], p.maxGridSize[2], p.clockRate, p.totalConstMem,
p.major, p.minor, p.textureAlignment, p.deviceOverlap,
p.multiProcessorCount, p.kernelExecTimeoutEnabled, p.integrated,
p.canMapHostMemory, p.computeMode, p.maxTexture1D,
p.maxTexture2D[0], p.maxTexture2D[1], p.maxTexture3D[0],
p.maxTexture3D[1], p.maxTexture3D[2], p.maxTexture1DLayered[0],
p.maxTexture1DLayered[1], p.maxTexture2DLayered[0],
p.maxTexture2DLayered[1], p.maxTexture2DLayered[2],
p.surfaceAlignment, p.concurrentKernels, p.ECCEnabled, p.pciBusID,
p.pciDeviceID, p.pciDomainID, p.tccDriver, p.asyncEngineCount,
p.unifiedAddressing, p.memoryClockRate, p.memoryBusWidth,
p.l2CacheSize, p.maxThreadsPerMultiProcessor);
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include <stdio.h>
int main(int argc, char** argv) {
struct cudaDeviceProp p;
int device;
cudaGetDevice(&device);
cudaGetDeviceProperties(&p, device);
printf("> %s\n"
"\ttotalGlobalMem: %u B\n"
"\tsharedMemPerBlock: %u B\n" "\tregsPerBlock: %d\n"
"\twarpSize: %d threads\n"
"\tmemPitch: %u B\n"
"\tmaxThreadsPerBlock: %d\n"
"\tmaxThreadsDim: (%d, %d, %d)\n"
"\tmaxGridSize: (%d, %d, %d)\n"
"\tclockRate: %d kHz\n"
"\ttotalConstMem: %u B\n"
"\tCompute Capability: %d.%d\n"
"\ttextureAlignment: %u\n"
"\tdeviceOverlap: %d\n"
"\tmultiProcessorCount: %d\n"
"\tkernelExecTimeoutEnabled: %d\n"
"\tintegrated: %d\n"
"\tcanMapHostMemory: %d\n"
"\tcomputeMode: %d\n"
"\tmaxTexture1D: %d\n"
"\tmaxTexture2D: (%d, %d)\n"
"\tmaxTexture3D: (%d, %d, %d)\n"
"\tmaxTexture1DLayered: (%d, %d)\n"
"\tmaxTexture2DLayered: (%d, %d, %d)\n"
"\tsurfaceAlignment: %u\n"
"\tconcurrentKernels: %d\n"
"\tECCEnabled: %d\n"
"\tPCI Bus ID: %d:%d.%d\n"
"\ttccDriver: %d\n"
"\tasyncEngineCount: %d\n"
"\tunifiedAddressing: %d\n"
"\tmemoryClockRate: %d kHz\n"
"\tmemoryBusWidth: %d bits\n"
"\tl2CacheSize: %d B\n"
"\tmaxThreadsPerMultiProcessor: %d\n",
p.name, p.totalGlobalMem, p.sharedMemPerBlock, p.regsPerBlock,
p.warpSize, p.memPitch, p.maxThreadsPerBlock, p.maxThreadsDim[0],
p.maxThreadsDim[1], p.maxThreadsDim[2], p.maxGridSize[0],
p.maxGridSize[1], p.maxGridSize[2], p.clockRate, p.totalConstMem,
p.major, p.minor, p.textureAlignment, p.deviceOverlap,
p.multiProcessorCount, p.kernelExecTimeoutEnabled, p.integrated,
p.canMapHostMemory, p.computeMode, p.maxTexture1D,
p.maxTexture2D[0], p.maxTexture2D[1], p.maxTexture3D[0],
p.maxTexture3D[1], p.maxTexture3D[2], p.maxTexture1DLayered[0],
p.maxTexture1DLayered[1], p.maxTexture2DLayered[0],
p.maxTexture2DLayered[1], p.maxTexture2DLayered[2],
p.surfaceAlignment, p.concurrentKernels, p.ECCEnabled, p.pciBusID,
p.pciDeviceID, p.pciDomainID, p.tccDriver, p.asyncEngineCount,
p.unifiedAddressing, p.memoryClockRate, p.memoryBusWidth,
p.l2CacheSize, p.maxThreadsPerMultiProcessor);
} | .file "tmpxft_001649d6_00000000-6_getDeviceProperties.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.ascii "> %s\n\ttotalGlobalMem: %u B\n\tsharedMemPerBlock: %u B\n\tr"
.ascii "egsPerBlock: %d\n\twarpSize: %d threads\n\tmemPitch: %u B\n\t"
.ascii "maxThreadsPerBlock: %d\n\tmaxThreadsDim: (%d, %d, %d)\n\tmax"
.ascii "GridSize: (%d, %d, %d)\n\tclockRate: %d kHz\n\ttotalConstMem"
.ascii ": %u B\n\tCompute Capability: %d.%d\n\ttextureAlignment: %u\n"
.ascii "\tdeviceOverlap: %d\n\tmultiProcessorCount: %d\n\tkernelExec"
.ascii "TimeoutEnabled: %d\n\tintegrated: %d\n\tcanMapHostMemory: %d"
.ascii "\n\tcomputeMode: %d\n\tmaxTexture1D: %d\n\tmaxTexture2D: (%d"
.ascii ", %d)\n\tmaxTexture3D: (%d, %d, %d)\n\tmaxTexture1DLayered: "
.ascii "(%d, %d)\n\tmaxTexture2DLayered: (%"
.string "d, %d, %d)\n\tsurfaceAlignment: %u\n\tconcurrentKernels: %d\n\tECCEnabled: %d\n\tPCI Bus ID: %d:%d.%d\n\ttccDriver: %d\n\tasyncEngineCount: %d\n\tunifiedAddressing: %d\n\tmemoryClockRate: %d kHz\n\tmemoryBusWidth: %d bits\n\tl2CacheSize: %d B\n\tmaxThreadsPerMultiProcessor: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $1056, %rsp
.cfi_def_cfa_offset 1072
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDevice@PLT
leaq 16(%rsp), %rbx
movl 12(%rsp), %esi
movq %rbx, %rdi
call cudaGetDeviceProperties_v2@PLT
movl 640(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1080
movl 640(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1088
movl 644(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1096
movl 648(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1104
movl 652(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1112
movl 656(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1120
movl 660(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1128
movl 664(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1136
movl 668(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1144
movl 672(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1152
movl 676(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1160
movl 680(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1168
pushq 680(%rsp)
.cfi_def_cfa_offset 1176
movl 620(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1184
movl 624(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1192
movl 628(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1200
movl 632(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1208
movl 636(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1216
movl 624(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1224
movl 628(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1232
movl 632(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1240
movl 608(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1248
movl 612(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1256
movl 608(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1264
movl 612(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1272
movl 616(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1280
movl 620(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1288
movl 624(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1296
movl 628(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1304
movl 632(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1312
pushq 624(%rsp)
.cfi_def_cfa_offset 1320
movl 628(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1328
movl 632(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1336
pushq 632(%rsp)
.cfi_def_cfa_offset 1344
movl 636(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1352
movl 640(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1360
movl 644(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1368
movl 648(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1376
movl 652(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1384
movl 656(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1392
movl 660(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1400
movl 664(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1408
pushq 664(%rsp)
.cfi_def_cfa_offset 1416
movl 668(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1424
movl 672(%rsp), %r9d
movq 664(%rsp), %r8
movq 656(%rsp), %rcx
movq %rbx, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $352, %rsp
.cfi_def_cfa_offset 1072
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L6
movl $0, %eax
addq $1056, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include <stdio.h>
int main(int argc, char** argv) {
struct cudaDeviceProp p;
int device;
cudaGetDevice(&device);
cudaGetDeviceProperties(&p, device);
printf("> %s\n"
"\ttotalGlobalMem: %u B\n"
"\tsharedMemPerBlock: %u B\n" "\tregsPerBlock: %d\n"
"\twarpSize: %d threads\n"
"\tmemPitch: %u B\n"
"\tmaxThreadsPerBlock: %d\n"
"\tmaxThreadsDim: (%d, %d, %d)\n"
"\tmaxGridSize: (%d, %d, %d)\n"
"\tclockRate: %d kHz\n"
"\ttotalConstMem: %u B\n"
"\tCompute Capability: %d.%d\n"
"\ttextureAlignment: %u\n"
"\tdeviceOverlap: %d\n"
"\tmultiProcessorCount: %d\n"
"\tkernelExecTimeoutEnabled: %d\n"
"\tintegrated: %d\n"
"\tcanMapHostMemory: %d\n"
"\tcomputeMode: %d\n"
"\tmaxTexture1D: %d\n"
"\tmaxTexture2D: (%d, %d)\n"
"\tmaxTexture3D: (%d, %d, %d)\n"
"\tmaxTexture1DLayered: (%d, %d)\n"
"\tmaxTexture2DLayered: (%d, %d, %d)\n"
"\tsurfaceAlignment: %u\n"
"\tconcurrentKernels: %d\n"
"\tECCEnabled: %d\n"
"\tPCI Bus ID: %d:%d.%d\n"
"\ttccDriver: %d\n"
"\tasyncEngineCount: %d\n"
"\tunifiedAddressing: %d\n"
"\tmemoryClockRate: %d kHz\n"
"\tmemoryBusWidth: %d bits\n"
"\tl2CacheSize: %d B\n"
"\tmaxThreadsPerMultiProcessor: %d\n",
p.name, p.totalGlobalMem, p.sharedMemPerBlock, p.regsPerBlock,
p.warpSize, p.memPitch, p.maxThreadsPerBlock, p.maxThreadsDim[0],
p.maxThreadsDim[1], p.maxThreadsDim[2], p.maxGridSize[0],
p.maxGridSize[1], p.maxGridSize[2], p.clockRate, p.totalConstMem,
p.major, p.minor, p.textureAlignment, p.deviceOverlap,
p.multiProcessorCount, p.kernelExecTimeoutEnabled, p.integrated,
p.canMapHostMemory, p.computeMode, p.maxTexture1D,
p.maxTexture2D[0], p.maxTexture2D[1], p.maxTexture3D[0],
p.maxTexture3D[1], p.maxTexture3D[2], p.maxTexture1DLayered[0],
p.maxTexture1DLayered[1], p.maxTexture2DLayered[0],
p.maxTexture2DLayered[1], p.maxTexture2DLayered[2],
p.surfaceAlignment, p.concurrentKernels, p.ECCEnabled, p.pciBusID,
p.pciDeviceID, p.pciDomainID, p.tccDriver, p.asyncEngineCount,
p.unifiedAddressing, p.memoryClockRate, p.memoryBusWidth,
p.l2CacheSize, p.maxThreadsPerMultiProcessor);
} | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char** argv) {
struct hipDeviceProp_t p;
int device;
hipGetDevice(&device);
hipGetDeviceProperties(&p, device);
printf("> %s\n"
"\ttotalGlobalMem: %u B\n"
"\tsharedMemPerBlock: %u B\n" "\tregsPerBlock: %d\n"
"\twarpSize: %d threads\n"
"\tmemPitch: %u B\n"
"\tmaxThreadsPerBlock: %d\n"
"\tmaxThreadsDim: (%d, %d, %d)\n"
"\tmaxGridSize: (%d, %d, %d)\n"
"\tclockRate: %d kHz\n"
"\ttotalConstMem: %u B\n"
"\tCompute Capability: %d.%d\n"
"\ttextureAlignment: %u\n"
"\tdeviceOverlap: %d\n"
"\tmultiProcessorCount: %d\n"
"\tkernelExecTimeoutEnabled: %d\n"
"\tintegrated: %d\n"
"\tcanMapHostMemory: %d\n"
"\tcomputeMode: %d\n"
"\tmaxTexture1D: %d\n"
"\tmaxTexture2D: (%d, %d)\n"
"\tmaxTexture3D: (%d, %d, %d)\n"
"\tmaxTexture1DLayered: (%d, %d)\n"
"\tmaxTexture2DLayered: (%d, %d, %d)\n"
"\tsurfaceAlignment: %u\n"
"\tconcurrentKernels: %d\n"
"\tECCEnabled: %d\n"
"\tPCI Bus ID: %d:%d.%d\n"
"\ttccDriver: %d\n"
"\tasyncEngineCount: %d\n"
"\tunifiedAddressing: %d\n"
"\tmemoryClockRate: %d kHz\n"
"\tmemoryBusWidth: %d bits\n"
"\tl2CacheSize: %d B\n"
"\tmaxThreadsPerMultiProcessor: %d\n",
p.name, p.totalGlobalMem, p.sharedMemPerBlock, p.regsPerBlock,
p.warpSize, p.memPitch, p.maxThreadsPerBlock, p.maxThreadsDim[0],
p.maxThreadsDim[1], p.maxThreadsDim[2], p.maxGridSize[0],
p.maxGridSize[1], p.maxGridSize[2], p.clockRate, p.totalConstMem,
p.major, p.minor, p.textureAlignment, p.deviceOverlap,
p.multiProcessorCount, p.kernelExecTimeoutEnabled, p.integrated,
p.canMapHostMemory, p.computeMode, p.maxTexture1D,
p.maxTexture2D[0], p.maxTexture2D[1], p.maxTexture3D[0],
p.maxTexture3D[1], p.maxTexture3D[2], p.maxTexture1DLayered[0],
p.maxTexture1DLayered[1], p.maxTexture2DLayered[0],
p.maxTexture2DLayered[1], p.maxTexture2DLayered[2],
p.surfaceAlignment, p.concurrentKernels, p.ECCEnabled, p.pciBusID,
p.pciDeviceID, p.pciDomainID, p.tccDriver, p.asyncEngineCount,
p.unifiedAddressing, p.memoryClockRate, p.memoryBusWidth,
p.l2CacheSize, p.maxThreadsPerMultiProcessor);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char** argv) {
struct hipDeviceProp_t p;
int device;
hipGetDevice(&device);
hipGetDeviceProperties(&p, device);
printf("> %s\n"
"\ttotalGlobalMem: %u B\n"
"\tsharedMemPerBlock: %u B\n" "\tregsPerBlock: %d\n"
"\twarpSize: %d threads\n"
"\tmemPitch: %u B\n"
"\tmaxThreadsPerBlock: %d\n"
"\tmaxThreadsDim: (%d, %d, %d)\n"
"\tmaxGridSize: (%d, %d, %d)\n"
"\tclockRate: %d kHz\n"
"\ttotalConstMem: %u B\n"
"\tCompute Capability: %d.%d\n"
"\ttextureAlignment: %u\n"
"\tdeviceOverlap: %d\n"
"\tmultiProcessorCount: %d\n"
"\tkernelExecTimeoutEnabled: %d\n"
"\tintegrated: %d\n"
"\tcanMapHostMemory: %d\n"
"\tcomputeMode: %d\n"
"\tmaxTexture1D: %d\n"
"\tmaxTexture2D: (%d, %d)\n"
"\tmaxTexture3D: (%d, %d, %d)\n"
"\tmaxTexture1DLayered: (%d, %d)\n"
"\tmaxTexture2DLayered: (%d, %d, %d)\n"
"\tsurfaceAlignment: %u\n"
"\tconcurrentKernels: %d\n"
"\tECCEnabled: %d\n"
"\tPCI Bus ID: %d:%d.%d\n"
"\ttccDriver: %d\n"
"\tasyncEngineCount: %d\n"
"\tunifiedAddressing: %d\n"
"\tmemoryClockRate: %d kHz\n"
"\tmemoryBusWidth: %d bits\n"
"\tl2CacheSize: %d B\n"
"\tmaxThreadsPerMultiProcessor: %d\n",
p.name, p.totalGlobalMem, p.sharedMemPerBlock, p.regsPerBlock,
p.warpSize, p.memPitch, p.maxThreadsPerBlock, p.maxThreadsDim[0],
p.maxThreadsDim[1], p.maxThreadsDim[2], p.maxGridSize[0],
p.maxGridSize[1], p.maxGridSize[2], p.clockRate, p.totalConstMem,
p.major, p.minor, p.textureAlignment, p.deviceOverlap,
p.multiProcessorCount, p.kernelExecTimeoutEnabled, p.integrated,
p.canMapHostMemory, p.computeMode, p.maxTexture1D,
p.maxTexture2D[0], p.maxTexture2D[1], p.maxTexture3D[0],
p.maxTexture3D[1], p.maxTexture3D[2], p.maxTexture1DLayered[0],
p.maxTexture1DLayered[1], p.maxTexture2DLayered[0],
p.maxTexture2DLayered[1], p.maxTexture2DLayered[2],
p.surfaceAlignment, p.concurrentKernels, p.ECCEnabled, p.pciBusID,
p.pciDeviceID, p.pciDomainID, p.tccDriver, p.asyncEngineCount,
p.unifiedAddressing, p.memoryClockRate, p.memoryBusWidth,
p.l2CacheSize, p.maxThreadsPerMultiProcessor);
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char** argv) {
struct hipDeviceProp_t p;
int device;
hipGetDevice(&device);
hipGetDeviceProperties(&p, device);
printf("> %s\n"
"\ttotalGlobalMem: %u B\n"
"\tsharedMemPerBlock: %u B\n" "\tregsPerBlock: %d\n"
"\twarpSize: %d threads\n"
"\tmemPitch: %u B\n"
"\tmaxThreadsPerBlock: %d\n"
"\tmaxThreadsDim: (%d, %d, %d)\n"
"\tmaxGridSize: (%d, %d, %d)\n"
"\tclockRate: %d kHz\n"
"\ttotalConstMem: %u B\n"
"\tCompute Capability: %d.%d\n"
"\ttextureAlignment: %u\n"
"\tdeviceOverlap: %d\n"
"\tmultiProcessorCount: %d\n"
"\tkernelExecTimeoutEnabled: %d\n"
"\tintegrated: %d\n"
"\tcanMapHostMemory: %d\n"
"\tcomputeMode: %d\n"
"\tmaxTexture1D: %d\n"
"\tmaxTexture2D: (%d, %d)\n"
"\tmaxTexture3D: (%d, %d, %d)\n"
"\tmaxTexture1DLayered: (%d, %d)\n"
"\tmaxTexture2DLayered: (%d, %d, %d)\n"
"\tsurfaceAlignment: %u\n"
"\tconcurrentKernels: %d\n"
"\tECCEnabled: %d\n"
"\tPCI Bus ID: %d:%d.%d\n"
"\ttccDriver: %d\n"
"\tasyncEngineCount: %d\n"
"\tunifiedAddressing: %d\n"
"\tmemoryClockRate: %d kHz\n"
"\tmemoryBusWidth: %d bits\n"
"\tl2CacheSize: %d B\n"
"\tmaxThreadsPerMultiProcessor: %d\n",
p.name, p.totalGlobalMem, p.sharedMemPerBlock, p.regsPerBlock,
p.warpSize, p.memPitch, p.maxThreadsPerBlock, p.maxThreadsDim[0],
p.maxThreadsDim[1], p.maxThreadsDim[2], p.maxGridSize[0],
p.maxGridSize[1], p.maxGridSize[2], p.clockRate, p.totalConstMem,
p.major, p.minor, p.textureAlignment, p.deviceOverlap,
p.multiProcessorCount, p.kernelExecTimeoutEnabled, p.integrated,
p.canMapHostMemory, p.computeMode, p.maxTexture1D,
p.maxTexture2D[0], p.maxTexture2D[1], p.maxTexture3D[0],
p.maxTexture3D[1], p.maxTexture3D[2], p.maxTexture1DLayered[0],
p.maxTexture1DLayered[1], p.maxTexture2DLayered[0],
p.maxTexture2DLayered[1], p.maxTexture2DLayered[2],
p.surfaceAlignment, p.concurrentKernels, p.ECCEnabled, p.pciBusID,
p.pciDeviceID, p.pciDomainID, p.tccDriver, p.asyncEngineCount,
p.unifiedAddressing, p.memoryClockRate, p.memoryBusWidth,
p.l2CacheSize, p.maxThreadsPerMultiProcessor);
} | .text
.file "getDeviceProperties.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $1736, %rsp # imm = 0x6C8
.cfi_def_cfa_offset 1792
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 12(%rsp), %rdi
callq hipGetDevice
movl 12(%rsp), %esi
leaq 264(%rsp), %rdi
callq hipGetDevicePropertiesR0600
movq 552(%rsp), %rdx
movq 560(%rsp), %rcx
movl 568(%rsp), %r8d
movl 572(%rsp), %r9d
movl 584(%rsp), %eax
movq %rax, 256(%rsp) # 8-byte Spill
movl 588(%rsp), %eax
movq %rax, 248(%rsp) # 8-byte Spill
movl 592(%rsp), %eax
movq %rax, 240(%rsp) # 8-byte Spill
movl 596(%rsp), %eax
movq %rax, 232(%rsp) # 8-byte Spill
movl 600(%rsp), %eax
movq %rax, 224(%rsp) # 8-byte Spill
movl 604(%rsp), %eax
movq %rax, 216(%rsp) # 8-byte Spill
movl 608(%rsp), %eax
movq %rax, 208(%rsp) # 8-byte Spill
movl 612(%rsp), %eax
movq %rax, 200(%rsp) # 8-byte Spill
movl 624(%rsp), %eax
movq %rax, 192(%rsp) # 8-byte Spill
movl 628(%rsp), %eax
movq %rax, 184(%rsp) # 8-byte Spill
movl 648(%rsp), %eax
movq %rax, 176(%rsp) # 8-byte Spill
movl 652(%rsp), %eax
movq %rax, 168(%rsp) # 8-byte Spill
movl 656(%rsp), %eax
movq %rax, 160(%rsp) # 8-byte Spill
movl 660(%rsp), %eax
movq %rax, 152(%rsp) # 8-byte Spill
movl 664(%rsp), %eax
movq %rax, 144(%rsp) # 8-byte Spill
movl 668(%rsp), %eax
movq %rax, 136(%rsp) # 8-byte Spill
movl 672(%rsp), %eax
movq %rax, 128(%rsp) # 8-byte Spill
movl 684(%rsp), %eax
movq %rax, 120(%rsp) # 8-byte Spill
movl 688(%rsp), %eax
movq %rax, 112(%rsp) # 8-byte Spill
movl 720(%rsp), %eax
movq %rax, 104(%rsp) # 8-byte Spill
movl 724(%rsp), %eax
movq %rax, 96(%rsp) # 8-byte Spill
movl 728(%rsp), %eax
movq %rax, 88(%rsp) # 8-byte Spill
movl 748(%rsp), %eax
movq %rax, 80(%rsp) # 8-byte Spill
movl 752(%rsp), %eax
movq %rax, 72(%rsp) # 8-byte Spill
movl 756(%rsp), %eax
movq %rax, 64(%rsp) # 8-byte Spill
movl 760(%rsp), %eax
movq %rax, 56(%rsp) # 8-byte Spill
movl 764(%rsp), %eax
movq %rax, 48(%rsp) # 8-byte Spill
movl 840(%rsp), %eax
movq %rax, 40(%rsp) # 8-byte Spill
movl 844(%rsp), %eax
movq %rax, 32(%rsp) # 8-byte Spill
movl 848(%rsp), %eax
movq %rax, 24(%rsp) # 8-byte Spill
movl 852(%rsp), %eax
movq %rax, 16(%rsp) # 8-byte Spill
movl 856(%rsp), %r14d
movl 860(%rsp), %r15d
movl 864(%rsp), %r12d
movl 868(%rsp), %r13d
movl 872(%rsp), %ebp
movl 876(%rsp), %r10d
movl 880(%rsp), %r11d
movl 888(%rsp), %ebx
subq $8, %rsp
.cfi_adjust_cfa_offset 8
movl $.L.str, %edi
leaq 272(%rsp), %rsi
xorl %eax, %eax
pushq %rbx
.cfi_adjust_cfa_offset 8
pushq %r11
.cfi_adjust_cfa_offset 8
pushq %r10
.cfi_adjust_cfa_offset 8
pushq %rbp
.cfi_adjust_cfa_offset 8
pushq %r13
.cfi_adjust_cfa_offset 8
pushq %r12
.cfi_adjust_cfa_offset 8
pushq %r15
.cfi_adjust_cfa_offset 8
pushq %r14
.cfi_adjust_cfa_offset 8
pushq 88(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 104(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 120(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 136(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 936(%rsp)
.cfi_adjust_cfa_offset 8
pushq 160(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 176(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 192(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 208(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 224(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 240(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 256(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 272(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 288(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 304(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 320(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 336(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 352(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 368(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 384(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 400(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 416(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 880(%rsp)
.cfi_adjust_cfa_offset 8
pushq 440(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 456(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 888(%rsp)
.cfi_adjust_cfa_offset 8
pushq 480(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 496(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 512(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 528(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 544(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 560(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 576(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 592(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 920(%rsp)
.cfi_adjust_cfa_offset 8
callq printf
addq $352, %rsp # imm = 0x160
.cfi_adjust_cfa_offset -352
xorl %eax, %eax
addq $1736, %rsp # imm = 0x6C8
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "> %s\n\ttotalGlobalMem: %u B\n\tsharedMemPerBlock: %u B\n\tregsPerBlock: %d\n\twarpSize: %d threads\n\tmemPitch: %u B\n\tmaxThreadsPerBlock: %d\n\tmaxThreadsDim: (%d, %d, %d)\n\tmaxGridSize: (%d, %d, %d)\n\tclockRate: %d kHz\n\ttotalConstMem: %u B\n\tCompute Capability: %d.%d\n\ttextureAlignment: %u\n\tdeviceOverlap: %d\n\tmultiProcessorCount: %d\n\tkernelExecTimeoutEnabled: %d\n\tintegrated: %d\n\tcanMapHostMemory: %d\n\tcomputeMode: %d\n\tmaxTexture1D: %d\n\tmaxTexture2D: (%d, %d)\n\tmaxTexture3D: (%d, %d, %d)\n\tmaxTexture1DLayered: (%d, %d)\n\tmaxTexture2DLayered: (%d, %d, %d)\n\tsurfaceAlignment: %u\n\tconcurrentKernels: %d\n\tECCEnabled: %d\n\tPCI Bus ID: %d:%d.%d\n\ttccDriver: %d\n\tasyncEngineCount: %d\n\tunifiedAddressing: %d\n\tmemoryClockRate: %d kHz\n\tmemoryBusWidth: %d bits\n\tl2CacheSize: %d B\n\tmaxThreadsPerMultiProcessor: %d\n"
.size .L.str, 788
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001649d6_00000000-6_getDeviceProperties.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.ascii "> %s\n\ttotalGlobalMem: %u B\n\tsharedMemPerBlock: %u B\n\tr"
.ascii "egsPerBlock: %d\n\twarpSize: %d threads\n\tmemPitch: %u B\n\t"
.ascii "maxThreadsPerBlock: %d\n\tmaxThreadsDim: (%d, %d, %d)\n\tmax"
.ascii "GridSize: (%d, %d, %d)\n\tclockRate: %d kHz\n\ttotalConstMem"
.ascii ": %u B\n\tCompute Capability: %d.%d\n\ttextureAlignment: %u\n"
.ascii "\tdeviceOverlap: %d\n\tmultiProcessorCount: %d\n\tkernelExec"
.ascii "TimeoutEnabled: %d\n\tintegrated: %d\n\tcanMapHostMemory: %d"
.ascii "\n\tcomputeMode: %d\n\tmaxTexture1D: %d\n\tmaxTexture2D: (%d"
.ascii ", %d)\n\tmaxTexture3D: (%d, %d, %d)\n\tmaxTexture1DLayered: "
.ascii "(%d, %d)\n\tmaxTexture2DLayered: (%"
.string "d, %d, %d)\n\tsurfaceAlignment: %u\n\tconcurrentKernels: %d\n\tECCEnabled: %d\n\tPCI Bus ID: %d:%d.%d\n\ttccDriver: %d\n\tasyncEngineCount: %d\n\tunifiedAddressing: %d\n\tmemoryClockRate: %d kHz\n\tmemoryBusWidth: %d bits\n\tl2CacheSize: %d B\n\tmaxThreadsPerMultiProcessor: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $1056, %rsp
.cfi_def_cfa_offset 1072
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDevice@PLT
leaq 16(%rsp), %rbx
movl 12(%rsp), %esi
movq %rbx, %rdi
call cudaGetDeviceProperties_v2@PLT
movl 640(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1080
movl 640(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1088
movl 644(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1096
movl 648(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1104
movl 652(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1112
movl 656(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1120
movl 660(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1128
movl 664(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1136
movl 668(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1144
movl 672(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1152
movl 676(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1160
movl 680(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1168
pushq 680(%rsp)
.cfi_def_cfa_offset 1176
movl 620(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1184
movl 624(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1192
movl 628(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1200
movl 632(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1208
movl 636(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1216
movl 624(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1224
movl 628(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1232
movl 632(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1240
movl 608(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1248
movl 612(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1256
movl 608(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1264
movl 612(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1272
movl 616(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1280
movl 620(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1288
movl 624(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1296
movl 628(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1304
movl 632(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1312
pushq 624(%rsp)
.cfi_def_cfa_offset 1320
movl 628(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1328
movl 632(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1336
pushq 632(%rsp)
.cfi_def_cfa_offset 1344
movl 636(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1352
movl 640(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1360
movl 644(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1368
movl 648(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1376
movl 652(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1384
movl 656(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1392
movl 660(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1400
movl 664(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1408
pushq 664(%rsp)
.cfi_def_cfa_offset 1416
movl 668(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 1424
movl 672(%rsp), %r9d
movq 664(%rsp), %r8
movq 656(%rsp), %rcx
movq %rbx, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $352, %rsp
.cfi_def_cfa_offset 1072
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L6
movl $0, %eax
addq $1056, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "getDeviceProperties.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $1736, %rsp # imm = 0x6C8
.cfi_def_cfa_offset 1792
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 12(%rsp), %rdi
callq hipGetDevice
movl 12(%rsp), %esi
leaq 264(%rsp), %rdi
callq hipGetDevicePropertiesR0600
movq 552(%rsp), %rdx
movq 560(%rsp), %rcx
movl 568(%rsp), %r8d
movl 572(%rsp), %r9d
movl 584(%rsp), %eax
movq %rax, 256(%rsp) # 8-byte Spill
movl 588(%rsp), %eax
movq %rax, 248(%rsp) # 8-byte Spill
movl 592(%rsp), %eax
movq %rax, 240(%rsp) # 8-byte Spill
movl 596(%rsp), %eax
movq %rax, 232(%rsp) # 8-byte Spill
movl 600(%rsp), %eax
movq %rax, 224(%rsp) # 8-byte Spill
movl 604(%rsp), %eax
movq %rax, 216(%rsp) # 8-byte Spill
movl 608(%rsp), %eax
movq %rax, 208(%rsp) # 8-byte Spill
movl 612(%rsp), %eax
movq %rax, 200(%rsp) # 8-byte Spill
movl 624(%rsp), %eax
movq %rax, 192(%rsp) # 8-byte Spill
movl 628(%rsp), %eax
movq %rax, 184(%rsp) # 8-byte Spill
movl 648(%rsp), %eax
movq %rax, 176(%rsp) # 8-byte Spill
movl 652(%rsp), %eax
movq %rax, 168(%rsp) # 8-byte Spill
movl 656(%rsp), %eax
movq %rax, 160(%rsp) # 8-byte Spill
movl 660(%rsp), %eax
movq %rax, 152(%rsp) # 8-byte Spill
movl 664(%rsp), %eax
movq %rax, 144(%rsp) # 8-byte Spill
movl 668(%rsp), %eax
movq %rax, 136(%rsp) # 8-byte Spill
movl 672(%rsp), %eax
movq %rax, 128(%rsp) # 8-byte Spill
movl 684(%rsp), %eax
movq %rax, 120(%rsp) # 8-byte Spill
movl 688(%rsp), %eax
movq %rax, 112(%rsp) # 8-byte Spill
movl 720(%rsp), %eax
movq %rax, 104(%rsp) # 8-byte Spill
movl 724(%rsp), %eax
movq %rax, 96(%rsp) # 8-byte Spill
movl 728(%rsp), %eax
movq %rax, 88(%rsp) # 8-byte Spill
movl 748(%rsp), %eax
movq %rax, 80(%rsp) # 8-byte Spill
movl 752(%rsp), %eax
movq %rax, 72(%rsp) # 8-byte Spill
movl 756(%rsp), %eax
movq %rax, 64(%rsp) # 8-byte Spill
movl 760(%rsp), %eax
movq %rax, 56(%rsp) # 8-byte Spill
movl 764(%rsp), %eax
movq %rax, 48(%rsp) # 8-byte Spill
movl 840(%rsp), %eax
movq %rax, 40(%rsp) # 8-byte Spill
movl 844(%rsp), %eax
movq %rax, 32(%rsp) # 8-byte Spill
movl 848(%rsp), %eax
movq %rax, 24(%rsp) # 8-byte Spill
movl 852(%rsp), %eax
movq %rax, 16(%rsp) # 8-byte Spill
movl 856(%rsp), %r14d
movl 860(%rsp), %r15d
movl 864(%rsp), %r12d
movl 868(%rsp), %r13d
movl 872(%rsp), %ebp
movl 876(%rsp), %r10d
movl 880(%rsp), %r11d
movl 888(%rsp), %ebx
subq $8, %rsp
.cfi_adjust_cfa_offset 8
movl $.L.str, %edi
leaq 272(%rsp), %rsi
xorl %eax, %eax
pushq %rbx
.cfi_adjust_cfa_offset 8
pushq %r11
.cfi_adjust_cfa_offset 8
pushq %r10
.cfi_adjust_cfa_offset 8
pushq %rbp
.cfi_adjust_cfa_offset 8
pushq %r13
.cfi_adjust_cfa_offset 8
pushq %r12
.cfi_adjust_cfa_offset 8
pushq %r15
.cfi_adjust_cfa_offset 8
pushq %r14
.cfi_adjust_cfa_offset 8
pushq 88(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 104(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 120(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 136(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 936(%rsp)
.cfi_adjust_cfa_offset 8
pushq 160(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 176(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 192(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 208(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 224(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 240(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 256(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 272(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 288(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 304(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 320(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 336(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 352(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 368(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 384(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 400(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 416(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 880(%rsp)
.cfi_adjust_cfa_offset 8
pushq 440(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 456(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 888(%rsp)
.cfi_adjust_cfa_offset 8
pushq 480(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 496(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 512(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 528(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 544(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 560(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 576(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 592(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq 920(%rsp)
.cfi_adjust_cfa_offset 8
callq printf
addq $352, %rsp # imm = 0x160
.cfi_adjust_cfa_offset -352
xorl %eax, %eax
addq $1736, %rsp # imm = 0x6C8
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "> %s\n\ttotalGlobalMem: %u B\n\tsharedMemPerBlock: %u B\n\tregsPerBlock: %d\n\twarpSize: %d threads\n\tmemPitch: %u B\n\tmaxThreadsPerBlock: %d\n\tmaxThreadsDim: (%d, %d, %d)\n\tmaxGridSize: (%d, %d, %d)\n\tclockRate: %d kHz\n\ttotalConstMem: %u B\n\tCompute Capability: %d.%d\n\ttextureAlignment: %u\n\tdeviceOverlap: %d\n\tmultiProcessorCount: %d\n\tkernelExecTimeoutEnabled: %d\n\tintegrated: %d\n\tcanMapHostMemory: %d\n\tcomputeMode: %d\n\tmaxTexture1D: %d\n\tmaxTexture2D: (%d, %d)\n\tmaxTexture3D: (%d, %d, %d)\n\tmaxTexture1DLayered: (%d, %d)\n\tmaxTexture2DLayered: (%d, %d, %d)\n\tsurfaceAlignment: %u\n\tconcurrentKernels: %d\n\tECCEnabled: %d\n\tPCI Bus ID: %d:%d.%d\n\ttccDriver: %d\n\tasyncEngineCount: %d\n\tunifiedAddressing: %d\n\tmemoryClockRate: %d kHz\n\tmemoryBusWidth: %d bits\n\tl2CacheSize: %d B\n\tmaxThreadsPerMultiProcessor: %d\n"
.size .L.str, 788
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
Program to find the sum of all the array elements given that the length of the array is a power of 2.
Benchmarking is been done to compare the performance of the CPU (squential with extremely powerful cores) and
the GPU (parallel with modestly powerful cores).
*/
/*
Sequential codes takes n-1 steps whereas parallel code takes log(n) base 2 steps.
Note: Algorithm can handle only sizes less than or equal to 2^11.
*/
// Importing the required headers
#include<stdio.h>
#include<time.h>
#include<cuda.h>
// Returns the duration from start to end times in sec
double time_elapsed(struct timespec *start, struct timespec *end)
{
double t;
t = (end->tv_sec - start->tv_sec); // diff in seconds
t += (end->tv_nsec - start->tv_nsec) * 0.000000001; //diff in nanoseconds
return t;
}
// GPU Kernel
__global__ void GPU_Sum1(int *a, int len) //GPU code without shared memory.
{
int id = threadIdx.x;
int n = blockDim.x;
while(n > 0)
{
if(id < n) a[id] += a[id + n];
__syncthreads();
n = n/2;
}
return;
}
// GPU Kernel
__global__ void GPU_Sum2(int *array, int len) //GPU code with shared memory.
{
extern __shared__ int a[];
int id = threadIdx.x;
int n = blockDim.x;
a[id] = array[id];
a[id+n] = array[id+n];
__syncthreads();
while(n > 0)
{
if(id < n) a[id] += a[id + n];
__syncthreads();
n = n/2;
}
if(id == 0) array[0] = a[0];
return;
}
// CPU Function
void CPU_Sum(int *a, int n)
{
for(int i = n-1; i > 0; i--)
a[i-1] += a[i];
return;
}
// Code execution begins here
int main()
{
struct timespec start1, end1; //variables to store time for GPU
struct timespec start2, end2; //variables to store time for GPU
struct timespec start3, end3; //variables to store time for CPU
int n; //Length of the array
printf("Enter the value of n: "); //Get length
scanf("%d", &n);
int *a1, *a2, *a3;
if(cudaMallocManaged(&a1, n*sizeof(int)) != cudaSuccess) //Allocate memory
{
printf("Malloc Error!\n");
return 0;
}
if(cudaMallocManaged(&a2, n*sizeof(int)) != cudaSuccess) //Allocate memory
{
printf("Malloc Error!\n");
cudaFree(a1);
return 0;
}
if(cudaMallocManaged(&a3, n*sizeof(int)) != cudaSuccess) //Allocate memory
{
printf("Malloc Error!\n");
cudaFree(a1);
cudaFree(a2);
return 0;
}
for(int i = 0; i < n; i++) //Assign random values
{
a1[i] = rand()%10;
a2[i] = a1[i];
a3[i] = a2[i];
}
clock_gettime(CLOCK_REALTIME, &start1); //start timestamp
GPU_Sum1<<<1, n/2>>>(a1, n);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end1); //end timestamp
clock_gettime(CLOCK_REALTIME, &start2); //start timestamp
GPU_Sum2<<<1, n/2, n*sizeof(int)>>>(a2, n);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end2); //end timestamp
clock_gettime(CLOCK_REALTIME, &start3); //start timestamp
CPU_Sum(a3, n);
clock_gettime(CLOCK_REALTIME, &end3); //end timestamp
printf("\nResult of the GPU (without shared memory) : %d\n", a1[0]);
printf("Result of the GPU (with shared memory) : %d\n", a2[0]);
printf("Result of the CPU : %d\n", a3[0]);
printf("\nTime taken by GPU (no shared memory) is : %lf\n", time_elapsed(&start1, &end1)); //print result for GPU
printf("Time taken by GPU (with shared memory) is : %lf\n", time_elapsed(&start2, &end2)); //print result for GPU
printf("Time taken by CPU is : %lf\n", time_elapsed(&start3, &end3)); //print result for CPU
cudaFree(a1);
cudaFree(a2);
cudaFree(a3);
cudaDeviceReset();
return 0;
} | code for sm_80
Function : _Z8GPU_Sum2Pii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R9, R7, c[0x0][0x160] ; /* 0x0000580009027625 */
/* 0x001fcc00078e0207 */
/*0050*/ IMAD.WIDE R4, R7, c[0x0][0x0], R2 ; /* 0x0000000007047a25 */
/* 0x000fe400078e0202 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ee2000c1e1900 */
/*0080*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff067624 */
/* 0x000fe200078e00ff */
/*0090*/ SHF.L.U32 R0, R9.reuse, 0x2, RZ ; /* 0x0000000209007819 */
/* 0x040fe400000006ff */
/*00a0*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fc40003f05270 */
/*00b0*/ ISETP.GE.AND P1, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x000fe20003f26270 */
/*00c0*/ IMAD R7, R7, c[0x0][0x0], R0 ; /* 0x0000000007077a24 */
/* 0x000fe200078e0200 */
/*00d0*/ STS [R9.X4], R2 ; /* 0x0000000209007388 */
/* 0x0041e80000004800 */
/*00e0*/ STS [R7], R4 ; /* 0x0000000407007388 */
/* 0x0081e80000000800 */
/*00f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0100*/ @!P1 BRA 0x1c0 ; /* 0x000000b000009947 */
/* 0x000fea0003800000 */
/*0110*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff037624 */
/* 0x001fca00078e00ff */
/*0120*/ ISETP.GE.AND P1, PT, R9, R3, PT ; /* 0x000000030900720c */
/* 0x000fda0003f26270 */
/*0130*/ @!P1 LEA R2, R3, R0, 0x2 ; /* 0x0000000003029211 */
/* 0x000fe200078e10ff */
/*0140*/ @!P1 LDS R4, [R9.X4] ; /* 0x0000000009049984 */
/* 0x000fe20000004800 */
/*0150*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc60000011603 */
/*0160*/ @!P1 LDS R5, [R2] ; /* 0x0000000002059984 */
/* 0x000e240000000800 */
/*0170*/ @!P1 IMAD.IADD R4, R4, 0x1, R5 ; /* 0x0000000104049824 */
/* 0x001fca00078e0205 */
/*0180*/ @!P1 STS [R9.X4], R4 ; /* 0x0000000409009388 */
/* 0x0001e80000004800 */
/*0190*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*01a0*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f25270 */
/*01b0*/ @P1 BRA 0x120 ; /* 0xffffff6000001947 */
/* 0x001fea000383ffff */
/*01c0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*01d0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*01e0*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */
/* 0x000fe20000000f00 */
/*01f0*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */
/* 0x000fca00078e00ff */
/*0200*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0210*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0220*/ BRA 0x220; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z8GPU_Sum1Pii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fc80000000f00 */
/*0020*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fda0003f06270 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0050*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0060*/ MOV R7, c[0x0][0x0] ; /* 0x0000000000077a02 */
/* 0x000fe20000000f00 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0080*/ IMAD.WIDE R2, R6, R3, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0203 */
/*0090*/ ISETP.GE.AND P0, PT, R6, R7, PT ; /* 0x000000070600720c */
/* 0x000fe20003f06270 */
/*00a0*/ BSSY B0, 0x120 ; /* 0x0000007000007945 */
/* 0x000fd80003800000 */
/*00b0*/ @P0 BRA 0x110 ; /* 0x0000005000000947 */
/* 0x001fea0003800000 */
/*00c0*/ IMAD.WIDE R4, R7, 0x4, R2 ; /* 0x0000000407047825 */
/* 0x000fe200078e0202 */
/*00d0*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000eaa000c1e1900 */
/*00e0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea4000c1e1900 */
/*00f0*/ IMAD.IADD R9, R0, 0x1, R5 ; /* 0x0000000100097824 */
/* 0x004fca00078e0205 */
/*0100*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x0001e4000c101904 */
/*0110*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0120*/ SHF.R.U32.HI R7, RZ, 0x1, R7 ; /* 0x00000001ff077819 */
/* 0x000fe20000011607 */
/*0130*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe60000010000 */
/*0140*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fda0003f05270 */
/*0150*/ @P0 BRA 0x90 ; /* 0xffffff3000000947 */
/* 0x000fea000383ffff */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
Program to find the sum of all the array elements given that the length of the array is a power of 2.
Benchmarking is been done to compare the performance of the CPU (squential with extremely powerful cores) and
the GPU (parallel with modestly powerful cores).
*/
/*
Sequential codes takes n-1 steps whereas parallel code takes log(n) base 2 steps.
Note: Algorithm can handle only sizes less than or equal to 2^11.
*/
// Importing the required headers
#include<stdio.h>
#include<time.h>
#include<cuda.h>
// Returns the duration from start to end times in sec
double time_elapsed(struct timespec *start, struct timespec *end)
{
double t;
t = (end->tv_sec - start->tv_sec); // diff in seconds
t += (end->tv_nsec - start->tv_nsec) * 0.000000001; //diff in nanoseconds
return t;
}
// GPU Kernel
__global__ void GPU_Sum1(int *a, int len) //GPU code without shared memory.
{
int id = threadIdx.x;
int n = blockDim.x;
while(n > 0)
{
if(id < n) a[id] += a[id + n];
__syncthreads();
n = n/2;
}
return;
}
// GPU Kernel
__global__ void GPU_Sum2(int *array, int len) //GPU code with shared memory.
{
extern __shared__ int a[];
int id = threadIdx.x;
int n = blockDim.x;
a[id] = array[id];
a[id+n] = array[id+n];
__syncthreads();
while(n > 0)
{
if(id < n) a[id] += a[id + n];
__syncthreads();
n = n/2;
}
if(id == 0) array[0] = a[0];
return;
}
// CPU Function
void CPU_Sum(int *a, int n)
{
for(int i = n-1; i > 0; i--)
a[i-1] += a[i];
return;
}
// Code execution begins here
int main()
{
struct timespec start1, end1; //variables to store time for GPU
struct timespec start2, end2; //variables to store time for GPU
struct timespec start3, end3; //variables to store time for CPU
int n; //Length of the array
printf("Enter the value of n: "); //Get length
scanf("%d", &n);
int *a1, *a2, *a3;
if(cudaMallocManaged(&a1, n*sizeof(int)) != cudaSuccess) //Allocate memory
{
printf("Malloc Error!\n");
return 0;
}
if(cudaMallocManaged(&a2, n*sizeof(int)) != cudaSuccess) //Allocate memory
{
printf("Malloc Error!\n");
cudaFree(a1);
return 0;
}
if(cudaMallocManaged(&a3, n*sizeof(int)) != cudaSuccess) //Allocate memory
{
printf("Malloc Error!\n");
cudaFree(a1);
cudaFree(a2);
return 0;
}
for(int i = 0; i < n; i++) //Assign random values
{
a1[i] = rand()%10;
a2[i] = a1[i];
a3[i] = a2[i];
}
clock_gettime(CLOCK_REALTIME, &start1); //start timestamp
GPU_Sum1<<<1, n/2>>>(a1, n);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end1); //end timestamp
clock_gettime(CLOCK_REALTIME, &start2); //start timestamp
GPU_Sum2<<<1, n/2, n*sizeof(int)>>>(a2, n);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end2); //end timestamp
clock_gettime(CLOCK_REALTIME, &start3); //start timestamp
CPU_Sum(a3, n);
clock_gettime(CLOCK_REALTIME, &end3); //end timestamp
printf("\nResult of the GPU (without shared memory) : %d\n", a1[0]);
printf("Result of the GPU (with shared memory) : %d\n", a2[0]);
printf("Result of the CPU : %d\n", a3[0]);
printf("\nTime taken by GPU (no shared memory) is : %lf\n", time_elapsed(&start1, &end1)); //print result for GPU
printf("Time taken by GPU (with shared memory) is : %lf\n", time_elapsed(&start2, &end2)); //print result for GPU
printf("Time taken by CPU is : %lf\n", time_elapsed(&start3, &end3)); //print result for CPU
cudaFree(a1);
cudaFree(a2);
cudaFree(a3);
cudaDeviceReset();
return 0;
} | .file "tmpxft_000685d4_00000000-6_ArraySum.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z12time_elapsedP8timespecS0_
.type _Z12time_elapsedP8timespecS0_, @function
_Z12time_elapsedP8timespecS0_:
.LFB2057:
.cfi_startproc
endbr64
movq (%rsi), %rax
subq (%rdi), %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
movq 8(%rsi), %rax
subq 8(%rdi), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
mulsd .LC0(%rip), %xmm0
addsd %xmm1, %xmm0
ret
.cfi_endproc
.LFE2057:
.size _Z12time_elapsedP8timespecS0_, .-_Z12time_elapsedP8timespecS0_
.globl _Z7CPU_SumPii
.type _Z7CPU_SumPii, @function
_Z7CPU_SumPii:
.LFB2058:
.cfi_startproc
endbr64
leal -1(%rsi), %eax
testl %eax, %eax
jle .L4
cltq
leaq -4(%rdi,%rax,4), %rax
movslq %esi, %rdx
leaq -12(%rdi,%rdx,4), %rcx
leal -2(%rsi), %edx
salq $2, %rdx
subq %rdx, %rcx
.L6:
movl 4(%rax), %edx
addl %edx, (%rax)
subq $4, %rax
cmpq %rcx, %rax
jne .L6
.L4:
ret
.cfi_endproc
.LFE2058:
.size _Z7CPU_SumPii, .-_Z7CPU_SumPii
.globl _Z28__device_stub__Z8GPU_Sum1PiiPii
.type _Z28__device_stub__Z8GPU_Sum1PiiPii, @function
_Z28__device_stub__Z8GPU_Sum1PiiPii:
.LFB2084:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L12
.L8:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L13
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z8GPU_Sum1Pii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L8
.L13:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z28__device_stub__Z8GPU_Sum1PiiPii, .-_Z28__device_stub__Z8GPU_Sum1PiiPii
.globl _Z8GPU_Sum1Pii
.type _Z8GPU_Sum1Pii, @function
_Z8GPU_Sum1Pii:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z8GPU_Sum1PiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z8GPU_Sum1Pii, .-_Z8GPU_Sum1Pii
.globl _Z28__device_stub__Z8GPU_Sum2PiiPii
.type _Z28__device_stub__Z8GPU_Sum2PiiPii, @function
_Z28__device_stub__Z8GPU_Sum2PiiPii:
.LFB2086:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z8GPU_Sum2Pii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z28__device_stub__Z8GPU_Sum2PiiPii, .-_Z28__device_stub__Z8GPU_Sum2PiiPii
.globl _Z8GPU_Sum2Pii
.type _Z8GPU_Sum2Pii, @function
_Z8GPU_Sum2Pii:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z8GPU_Sum2PiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z8GPU_Sum2Pii, .-_Z8GPU_Sum2Pii
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Enter the value of n: "
.LC2:
.string "%d"
.LC3:
.string "Malloc Error!\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "\nResult of the GPU (without shared memory) : %d\n"
.align 8
.LC5:
.string "Result of the GPU (with shared memory) : %d\n"
.align 8
.LC6:
.string "Result of the CPU : %d\n"
.align 8
.LC7:
.string "\nTime taken by GPU (no shared memory) is : %lf\n"
.align 8
.LC8:
.string "Time taken by GPU (with shared memory) is : %lf\n"
.align 8
.LC9:
.string "Time taken by CPU is : %lf\n"
.text
.globl main
.type main, @function
main:
.LFB2059:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $144, %rsp
.cfi_def_cfa_offset 176
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq .LC1(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
leaq 4(%rsp), %rsi
leaq .LC2(%rip), %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movslq 4(%rsp), %rsi
salq $2, %rsi
leaq 8(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L38
movslq 4(%rsp), %rsi
salq $2, %rsi
leaq 16(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L39
movslq 4(%rsp), %rsi
salq $2, %rsi
leaq 24(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L28
movl $0, %ebx
cmpl $0, 4(%rsp)
jle .L30
.L29:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
movq 8(%rsp), %rdx
movl %eax, (%rdx,%rbx,4)
movq 8(%rsp), %rax
movl (%rax,%rbx,4), %edx
movq 16(%rsp), %rax
movl %edx, (%rax,%rbx,4)
movq 16(%rsp), %rax
movl (%rax,%rbx,4), %edx
movq 24(%rsp), %rax
movl %edx, (%rax,%rbx,4)
addq $1, %rbx
cmpl %ebx, 4(%rsp)
jg .L29
.L30:
leaq 32(%rsp), %rsi
movl $0, %edi
call clock_gettime@PLT
movl 4(%rsp), %eax
movl $2, %ecx
cltd
idivl %ecx
movl %eax, 112(%rsp)
movl $1, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 112(%rsp), %rdx
movl $1, %ecx
movq 96(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L40
.L31:
call cudaDeviceSynchronize@PLT
leaq 48(%rsp), %rsi
movl $0, %edi
call clock_gettime@PLT
leaq 64(%rsp), %rsi
movl $0, %edi
call clock_gettime@PLT
movl 4(%rsp), %ecx
movl $2, %esi
movl %ecx, %eax
cltd
idivl %esi
movl %eax, 112(%rsp)
movl $1, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movslq %ecx, %rcx
movl $0, %r9d
leaq 0(,%rcx,4), %r8
movq 112(%rsp), %rdx
movl $1, %ecx
movq 96(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L41
.L32:
call cudaDeviceSynchronize@PLT
leaq 80(%rsp), %r12
movq %r12, %rsi
movl $0, %edi
call clock_gettime@PLT
leaq 96(%rsp), %rbx
movq %rbx, %rsi
movl $0, %edi
call clock_gettime@PLT
movl 4(%rsp), %esi
movq 24(%rsp), %rdi
call _Z7CPU_SumPii
leaq 112(%rsp), %rbp
movq %rbp, %rsi
movl $0, %edi
call clock_gettime@PLT
movq 8(%rsp), %rax
movl (%rax), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 16(%rsp), %rax
movl (%rax), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 24(%rsp), %rax
movl (%rax), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 48(%rsp), %rsi
leaq 32(%rsp), %rdi
call _Z12time_elapsedP8timespecS0_
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq 64(%rsp), %rdi
movq %r12, %rsi
call _Z12time_elapsedP8timespecS0_
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %rbp, %rsi
movq %rbx, %rdi
call _Z12time_elapsedP8timespecS0_
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
call cudaDeviceReset@PLT
.L26:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L42
movl $0, %eax
addq $144, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L38:
.cfi_restore_state
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L26
.L39:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
jmp .L26
.L28:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
jmp .L26
.L40:
movl 4(%rsp), %esi
movq 8(%rsp), %rdi
call _Z28__device_stub__Z8GPU_Sum1PiiPii
jmp .L31
.L41:
movl 4(%rsp), %esi
movq 16(%rsp), %rdi
call _Z28__device_stub__Z8GPU_Sum2PiiPii
jmp .L32
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z8GPU_Sum2Pii"
.LC11:
.string "_Z8GPU_Sum1Pii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2089:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z8GPU_Sum2Pii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z8GPU_Sum1Pii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -400107883
.long 1041313291
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
Program to find the sum of all the array elements given that the length of the array is a power of 2.
Benchmarking is been done to compare the performance of the CPU (squential with extremely powerful cores) and
the GPU (parallel with modestly powerful cores).
*/
/*
Sequential codes takes n-1 steps whereas parallel code takes log(n) base 2 steps.
Note: Algorithm can handle only sizes less than or equal to 2^11.
*/
// Importing the required headers
#include<stdio.h>
#include<time.h>
#include<cuda.h>
// Returns the duration from start to end times in sec
double time_elapsed(struct timespec *start, struct timespec *end)
{
double t;
t = (end->tv_sec - start->tv_sec); // diff in seconds
t += (end->tv_nsec - start->tv_nsec) * 0.000000001; //diff in nanoseconds
return t;
}
// GPU Kernel
__global__ void GPU_Sum1(int *a, int len) //GPU code without shared memory.
{
int id = threadIdx.x;
int n = blockDim.x;
while(n > 0)
{
if(id < n) a[id] += a[id + n];
__syncthreads();
n = n/2;
}
return;
}
// GPU Kernel
__global__ void GPU_Sum2(int *array, int len) //GPU code with shared memory.
{
extern __shared__ int a[];
int id = threadIdx.x;
int n = blockDim.x;
a[id] = array[id];
a[id+n] = array[id+n];
__syncthreads();
while(n > 0)
{
if(id < n) a[id] += a[id + n];
__syncthreads();
n = n/2;
}
if(id == 0) array[0] = a[0];
return;
}
// CPU Function
void CPU_Sum(int *a, int n)
{
for(int i = n-1; i > 0; i--)
a[i-1] += a[i];
return;
}
// Code execution begins here
int main()
{
struct timespec start1, end1; //variables to store time for GPU
struct timespec start2, end2; //variables to store time for GPU
struct timespec start3, end3; //variables to store time for CPU
int n; //Length of the array
printf("Enter the value of n: "); //Get length
scanf("%d", &n);
int *a1, *a2, *a3;
if(cudaMallocManaged(&a1, n*sizeof(int)) != cudaSuccess) //Allocate memory
{
printf("Malloc Error!\n");
return 0;
}
if(cudaMallocManaged(&a2, n*sizeof(int)) != cudaSuccess) //Allocate memory
{
printf("Malloc Error!\n");
cudaFree(a1);
return 0;
}
if(cudaMallocManaged(&a3, n*sizeof(int)) != cudaSuccess) //Allocate memory
{
printf("Malloc Error!\n");
cudaFree(a1);
cudaFree(a2);
return 0;
}
for(int i = 0; i < n; i++) //Assign random values
{
a1[i] = rand()%10;
a2[i] = a1[i];
a3[i] = a2[i];
}
clock_gettime(CLOCK_REALTIME, &start1); //start timestamp
GPU_Sum1<<<1, n/2>>>(a1, n);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end1); //end timestamp
clock_gettime(CLOCK_REALTIME, &start2); //start timestamp
GPU_Sum2<<<1, n/2, n*sizeof(int)>>>(a2, n);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end2); //end timestamp
clock_gettime(CLOCK_REALTIME, &start3); //start timestamp
CPU_Sum(a3, n);
clock_gettime(CLOCK_REALTIME, &end3); //end timestamp
printf("\nResult of the GPU (without shared memory) : %d\n", a1[0]);
printf("Result of the GPU (with shared memory) : %d\n", a2[0]);
printf("Result of the CPU : %d\n", a3[0]);
printf("\nTime taken by GPU (no shared memory) is : %lf\n", time_elapsed(&start1, &end1)); //print result for GPU
printf("Time taken by GPU (with shared memory) is : %lf\n", time_elapsed(&start2, &end2)); //print result for GPU
printf("Time taken by CPU is : %lf\n", time_elapsed(&start3, &end3)); //print result for CPU
cudaFree(a1);
cudaFree(a2);
cudaFree(a3);
cudaDeviceReset();
return 0;
} | /*
Program to find the sum of all the array elements given that the length of the array is a power of 2.
Benchmarking is been done to compare the performance of the CPU (squential with extremely powerful cores) and
the GPU (parallel with modestly powerful cores).
*/
/*
Sequential codes takes n-1 steps whereas parallel code takes log(n) base 2 steps.
Note: Algorithm can handle only sizes less than or equal to 2^11.
*/
// Importing the required headers
#include<stdio.h>
#include<time.h>
#include<hip/hip_runtime.h>
// Returns the duration from start to end times in sec
double time_elapsed(struct timespec *start, struct timespec *end)
{
double t;
t = (end->tv_sec - start->tv_sec); // diff in seconds
t += (end->tv_nsec - start->tv_nsec) * 0.000000001; //diff in nanoseconds
return t;
}
// GPU Kernel
__global__ void GPU_Sum1(int *a, int len) //GPU code without shared memory.
{
int id = threadIdx.x;
int n = blockDim.x;
while(n > 0)
{
if(id < n) a[id] += a[id + n];
__syncthreads();
n = n/2;
}
return;
}
// GPU Kernel
__global__ void GPU_Sum2(int *array, int len) //GPU code with shared memory.
{
extern __shared__ int a[];
int id = threadIdx.x;
int n = blockDim.x;
a[id] = array[id];
a[id+n] = array[id+n];
__syncthreads();
while(n > 0)
{
if(id < n) a[id] += a[id + n];
__syncthreads();
n = n/2;
}
if(id == 0) array[0] = a[0];
return;
}
// CPU Function
void CPU_Sum(int *a, int n)
{
for(int i = n-1; i > 0; i--)
a[i-1] += a[i];
return;
}
// Code execution begins here
int main()
{
struct timespec start1, end1; //variables to store time for GPU
struct timespec start2, end2; //variables to store time for GPU
struct timespec start3, end3; //variables to store time for CPU
int n; //Length of the array
printf("Enter the value of n: "); //Get length
scanf("%d", &n);
int *a1, *a2, *a3;
if(hipMallocManaged(&a1, n*sizeof(int)) != hipSuccess) //Allocate memory
{
printf("Malloc Error!\n");
return 0;
}
if(hipMallocManaged(&a2, n*sizeof(int)) != hipSuccess) //Allocate memory
{
printf("Malloc Error!\n");
hipFree(a1);
return 0;
}
if(hipMallocManaged(&a3, n*sizeof(int)) != hipSuccess) //Allocate memory
{
printf("Malloc Error!\n");
hipFree(a1);
hipFree(a2);
return 0;
}
for(int i = 0; i < n; i++) //Assign random values
{
a1[i] = rand()%10;
a2[i] = a1[i];
a3[i] = a2[i];
}
clock_gettime(CLOCK_REALTIME, &start1); //start timestamp
GPU_Sum1<<<1, n/2>>>(a1, n);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end1); //end timestamp
clock_gettime(CLOCK_REALTIME, &start2); //start timestamp
GPU_Sum2<<<1, n/2, n*sizeof(int)>>>(a2, n);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end2); //end timestamp
clock_gettime(CLOCK_REALTIME, &start3); //start timestamp
CPU_Sum(a3, n);
clock_gettime(CLOCK_REALTIME, &end3); //end timestamp
printf("\nResult of the GPU (without shared memory) : %d\n", a1[0]);
printf("Result of the GPU (with shared memory) : %d\n", a2[0]);
printf("Result of the CPU : %d\n", a3[0]);
printf("\nTime taken by GPU (no shared memory) is : %lf\n", time_elapsed(&start1, &end1)); //print result for GPU
printf("Time taken by GPU (with shared memory) is : %lf\n", time_elapsed(&start2, &end2)); //print result for GPU
printf("Time taken by CPU is : %lf\n", time_elapsed(&start3, &end3)); //print result for CPU
hipFree(a1);
hipFree(a2);
hipFree(a3);
hipDeviceReset();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
Program to find the sum of all the array elements given that the length of the array is a power of 2.
Benchmarking is been done to compare the performance of the CPU (squential with extremely powerful cores) and
the GPU (parallel with modestly powerful cores).
*/
/*
Sequential codes takes n-1 steps whereas parallel code takes log(n) base 2 steps.
Note: Algorithm can handle only sizes less than or equal to 2^11.
*/
// Importing the required headers
#include<stdio.h>
#include<time.h>
#include<hip/hip_runtime.h>
// Returns the duration from start to end times in sec
double time_elapsed(struct timespec *start, struct timespec *end)
{
double t;
t = (end->tv_sec - start->tv_sec); // diff in seconds
t += (end->tv_nsec - start->tv_nsec) * 0.000000001; //diff in nanoseconds
return t;
}
// GPU Kernel
__global__ void GPU_Sum1(int *a, int len) //GPU code without shared memory.
{
int id = threadIdx.x;
int n = blockDim.x;
while(n > 0)
{
if(id < n) a[id] += a[id + n];
__syncthreads();
n = n/2;
}
return;
}
// GPU Kernel
__global__ void GPU_Sum2(int *array, int len) //GPU code with shared memory.
{
extern __shared__ int a[];
int id = threadIdx.x;
int n = blockDim.x;
a[id] = array[id];
a[id+n] = array[id+n];
__syncthreads();
while(n > 0)
{
if(id < n) a[id] += a[id + n];
__syncthreads();
n = n/2;
}
if(id == 0) array[0] = a[0];
return;
}
// CPU Function
void CPU_Sum(int *a, int n)
{
for(int i = n-1; i > 0; i--)
a[i-1] += a[i];
return;
}
// Code execution begins here
int main()
{
struct timespec start1, end1; //variables to store time for GPU
struct timespec start2, end2; //variables to store time for GPU
struct timespec start3, end3; //variables to store time for CPU
int n; //Length of the array
printf("Enter the value of n: "); //Get length
scanf("%d", &n);
int *a1, *a2, *a3;
if(hipMallocManaged(&a1, n*sizeof(int)) != hipSuccess) //Allocate memory
{
printf("Malloc Error!\n");
return 0;
}
if(hipMallocManaged(&a2, n*sizeof(int)) != hipSuccess) //Allocate memory
{
printf("Malloc Error!\n");
hipFree(a1);
return 0;
}
if(hipMallocManaged(&a3, n*sizeof(int)) != hipSuccess) //Allocate memory
{
printf("Malloc Error!\n");
hipFree(a1);
hipFree(a2);
return 0;
}
for(int i = 0; i < n; i++) //Assign random values
{
a1[i] = rand()%10;
a2[i] = a1[i];
a3[i] = a2[i];
}
clock_gettime(CLOCK_REALTIME, &start1); //start timestamp
GPU_Sum1<<<1, n/2>>>(a1, n);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end1); //end timestamp
clock_gettime(CLOCK_REALTIME, &start2); //start timestamp
GPU_Sum2<<<1, n/2, n*sizeof(int)>>>(a2, n);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end2); //end timestamp
clock_gettime(CLOCK_REALTIME, &start3); //start timestamp
CPU_Sum(a3, n);
clock_gettime(CLOCK_REALTIME, &end3); //end timestamp
printf("\nResult of the GPU (without shared memory) : %d\n", a1[0]);
printf("Result of the GPU (with shared memory) : %d\n", a2[0]);
printf("Result of the CPU : %d\n", a3[0]);
printf("\nTime taken by GPU (no shared memory) is : %lf\n", time_elapsed(&start1, &end1)); //print result for GPU
printf("Time taken by GPU (with shared memory) is : %lf\n", time_elapsed(&start2, &end2)); //print result for GPU
printf("Time taken by CPU is : %lf\n", time_elapsed(&start3, &end3)); //print result for CPU
hipFree(a1);
hipFree(a2);
hipFree(a3);
hipDeviceReset();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8GPU_Sum1Pii
.globl _Z8GPU_Sum1Pii
.p2align 8
.type _Z8GPU_Sum1Pii,@function
_Z8GPU_Sum1Pii:
s_load_b32 s2, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
v_cmp_eq_u16_e64 s3, s2, 0
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s3
s_cbranch_vccnz .LBB0_5
s_load_b64 s[0:1], s[0:1], 0x0
v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 2, v0
s_and_b32 s2, 0xffff, s2
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v3, s3, s0, v1
v_add_co_ci_u32_e64 v4, null, s1, 0, s3
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s3
s_lshr_b32 s3, s2, 1
s_cmp_gt_u32 s2, 1
s_mov_b32 s2, s3
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_5
.LBB0_3:
s_mov_b32 s3, exec_lo
v_cmpx_gt_u32_e64 s2, v0
s_cbranch_execz .LBB0_2
v_add_nc_u32_e32 v1, s2, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[1:2]
v_add_co_u32 v5, vcc_lo, s0, v5
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s1, v6, vcc_lo
s_clause 0x1
global_load_b32 v1, v[5:6], off
global_load_b32 v5, v[3:4], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v5, v1
global_store_b32 v[3:4], v1, off
s_branch .LBB0_2
.LBB0_5:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8GPU_Sum1Pii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 4
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8GPU_Sum1Pii, .Lfunc_end0-_Z8GPU_Sum1Pii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z8GPU_Sum2Pii
.globl _Z8GPU_Sum2Pii
.p2align 8
.type _Z8GPU_Sum2Pii,@function
_Z8GPU_Sum2Pii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1)
v_add_lshl_u32 v2, v0, s2, 2
s_cmp_eq_u32 s2, 0
s_clause 0x1
global_load_b32 v3, v1, s[0:1]
global_load_b32 v4, v2, s[0:1]
v_add_nc_u32_e32 v1, 0, v1
v_add_nc_u32_e32 v2, 0, v2
s_waitcnt vmcnt(1)
ds_store_b32 v1, v3
s_waitcnt vmcnt(0)
ds_store_b32 v2, v4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB1_5
.LBB1_1:
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB1_3
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v1, v0, s[0:1]
.LBB1_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.p2align 6
.LBB1_4:
s_or_b32 exec_lo, exec_lo, s3
s_lshr_b32 s3, s2, 1
s_cmp_gt_u32 s2, 1
s_mov_b32 s2, s3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB1_1
.LBB1_5:
s_mov_b32 s3, exec_lo
v_cmpx_gt_u32_e64 s2, v0
s_cbranch_execz .LBB1_4
v_add_nc_u32_e32 v2, s2, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v2, v2, 2, 0
ds_load_b32 v2, v2
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB1_4
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8GPU_Sum2Pii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z8GPU_Sum2Pii, .Lfunc_end1-_Z8GPU_Sum2Pii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8GPU_Sum1Pii
.private_segment_fixed_size: 0
.sgpr_count: 6
.sgpr_spill_count: 0
.symbol: _Z8GPU_Sum1Pii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
- .offset: 136
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8GPU_Sum2Pii
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z8GPU_Sum2Pii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
Program to find the sum of all the array elements given that the length of the array is a power of 2.
Benchmarking is been done to compare the performance of the CPU (squential with extremely powerful cores) and
the GPU (parallel with modestly powerful cores).
*/
/*
Sequential codes takes n-1 steps whereas parallel code takes log(n) base 2 steps.
Note: Algorithm can handle only sizes less than or equal to 2^11.
*/
// Importing the required headers
#include<stdio.h>
#include<time.h>
#include<hip/hip_runtime.h>
// Returns the duration from start to end times in sec
double time_elapsed(struct timespec *start, struct timespec *end)
{
double t;
t = (end->tv_sec - start->tv_sec); // diff in seconds
t += (end->tv_nsec - start->tv_nsec) * 0.000000001; //diff in nanoseconds
return t;
}
// GPU Kernel
__global__ void GPU_Sum1(int *a, int len) //GPU code without shared memory.
{
int id = threadIdx.x;
int n = blockDim.x;
while(n > 0)
{
if(id < n) a[id] += a[id + n];
__syncthreads();
n = n/2;
}
return;
}
// GPU Kernel
__global__ void GPU_Sum2(int *array, int len) //GPU code with shared memory.
{
extern __shared__ int a[];
int id = threadIdx.x;
int n = blockDim.x;
a[id] = array[id];
a[id+n] = array[id+n];
__syncthreads();
while(n > 0)
{
if(id < n) a[id] += a[id + n];
__syncthreads();
n = n/2;
}
if(id == 0) array[0] = a[0];
return;
}
// CPU Function
void CPU_Sum(int *a, int n)
{
for(int i = n-1; i > 0; i--)
a[i-1] += a[i];
return;
}
// Code execution begins here
int main()
{
struct timespec start1, end1; //variables to store time for GPU
struct timespec start2, end2; //variables to store time for GPU
struct timespec start3, end3; //variables to store time for CPU
int n; //Length of the array
printf("Enter the value of n: "); //Get length
scanf("%d", &n);
int *a1, *a2, *a3;
if(hipMallocManaged(&a1, n*sizeof(int)) != hipSuccess) //Allocate memory
{
printf("Malloc Error!\n");
return 0;
}
if(hipMallocManaged(&a2, n*sizeof(int)) != hipSuccess) //Allocate memory
{
printf("Malloc Error!\n");
hipFree(a1);
return 0;
}
if(hipMallocManaged(&a3, n*sizeof(int)) != hipSuccess) //Allocate memory
{
printf("Malloc Error!\n");
hipFree(a1);
hipFree(a2);
return 0;
}
for(int i = 0; i < n; i++) //Assign random values
{
a1[i] = rand()%10;
a2[i] = a1[i];
a3[i] = a2[i];
}
clock_gettime(CLOCK_REALTIME, &start1); //start timestamp
GPU_Sum1<<<1, n/2>>>(a1, n);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end1); //end timestamp
clock_gettime(CLOCK_REALTIME, &start2); //start timestamp
GPU_Sum2<<<1, n/2, n*sizeof(int)>>>(a2, n);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end2); //end timestamp
clock_gettime(CLOCK_REALTIME, &start3); //start timestamp
CPU_Sum(a3, n);
clock_gettime(CLOCK_REALTIME, &end3); //end timestamp
printf("\nResult of the GPU (without shared memory) : %d\n", a1[0]);
printf("Result of the GPU (with shared memory) : %d\n", a2[0]);
printf("Result of the CPU : %d\n", a3[0]);
printf("\nTime taken by GPU (no shared memory) is : %lf\n", time_elapsed(&start1, &end1)); //print result for GPU
printf("Time taken by GPU (with shared memory) is : %lf\n", time_elapsed(&start2, &end2)); //print result for GPU
printf("Time taken by CPU is : %lf\n", time_elapsed(&start3, &end3)); //print result for CPU
hipFree(a1);
hipFree(a2);
hipFree(a3);
hipDeviceReset();
return 0;
} | .text
.file "ArraySum.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z12time_elapsedP8timespecS0_
.LCPI0_0:
.quad 0x3e112e0be826d695 # double 1.0000000000000001E-9
.text
.globl _Z12time_elapsedP8timespecS0_
.p2align 4, 0x90
.type _Z12time_elapsedP8timespecS0_,@function
_Z12time_elapsedP8timespecS0_: # @_Z12time_elapsedP8timespecS0_
.cfi_startproc
# %bb.0:
movq (%rsi), %rax
movq 8(%rsi), %rcx
subq (%rdi), %rax
cvtsi2sd %rax, %xmm1
subq 8(%rdi), %rcx
cvtsi2sd %rcx, %xmm0
mulsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
retq
.Lfunc_end0:
.size _Z12time_elapsedP8timespecS0_, .Lfunc_end0-_Z12time_elapsedP8timespecS0_
.cfi_endproc
# -- End function
.globl _Z23__device_stub__GPU_Sum1Pii # -- Begin function _Z23__device_stub__GPU_Sum1Pii
.p2align 4, 0x90
.type _Z23__device_stub__GPU_Sum1Pii,@function
_Z23__device_stub__GPU_Sum1Pii: # @_Z23__device_stub__GPU_Sum1Pii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z8GPU_Sum1Pii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z23__device_stub__GPU_Sum1Pii, .Lfunc_end1-_Z23__device_stub__GPU_Sum1Pii
.cfi_endproc
# -- End function
.globl _Z23__device_stub__GPU_Sum2Pii # -- Begin function _Z23__device_stub__GPU_Sum2Pii
.p2align 4, 0x90
.type _Z23__device_stub__GPU_Sum2Pii,@function
_Z23__device_stub__GPU_Sum2Pii: # @_Z23__device_stub__GPU_Sum2Pii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z8GPU_Sum2Pii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end2:
.size _Z23__device_stub__GPU_Sum2Pii, .Lfunc_end2-_Z23__device_stub__GPU_Sum2Pii
.cfi_endproc
# -- End function
.globl _Z7CPU_SumPii # -- Begin function _Z7CPU_SumPii
.p2align 4, 0x90
.type _Z7CPU_SumPii,@function
_Z7CPU_SumPii: # @_Z7CPU_SumPii
.cfi_startproc
# %bb.0:
cmpl $2, %esi
jl .LBB3_3
# %bb.1: # %.lr.ph.preheader
movl %esi, %eax
movl -4(%rdi,%rax,4), %ecx
incq %rax
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
addl -12(%rdi,%rax,4), %ecx
movl %ecx, -12(%rdi,%rax,4)
decq %rax
cmpq $2, %rax
jg .LBB3_2
.LBB3_3: # %._crit_edge
retq
.Lfunc_end3:
.size _Z7CPU_SumPii, .Lfunc_end3-_Z7CPU_SumPii
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI4_0:
.quad 0x3e112e0be826d695 # double 1.0000000000000001E-9
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $128, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -16
movl $.L.str, %edi
xorl %eax, %eax
callq printf
leaq 4(%rsp), %rsi
movl $.L.str.1, %edi
xorl %eax, %eax
callq __isoc23_scanf
movslq 4(%rsp), %rsi
shlq $2, %rsi
leaq 8(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
je .LBB4_2
# %bb.1:
movl $.Lstr.2, %edi
callq puts@PLT
jmp .LBB4_16
.LBB4_2:
movslq 4(%rsp), %rsi
shlq $2, %rsi
leaq 16(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
je .LBB4_4
# %bb.3:
movl $.Lstr.2, %edi
callq puts@PLT
movq 8(%rsp), %rdi
callq hipFree
jmp .LBB4_16
.LBB4_4:
movslq 4(%rsp), %rsi
shlq $2, %rsi
leaq 24(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
je .LBB4_5
# %bb.17:
movl $.Lstr.2, %edi
callq puts@PLT
movq 8(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
jmp .LBB4_16
.LBB4_5: # %.preheader
cmpl $0, 4(%rsp)
jle .LBB4_8
# %bb.6: # %.lr.ph.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_7: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movq 8(%rsp), %rcx
movl %eax, (%rcx,%rbx,4)
movq 16(%rsp), %rcx
movl %eax, (%rcx,%rbx,4)
movq 24(%rsp), %rcx
movl %eax, (%rcx,%rbx,4)
incq %rbx
movslq 4(%rsp), %rax
cmpq %rax, %rbx
jl .LBB4_7
.LBB4_8: # %._crit_edge
movabsq $4294967296, %rbx # imm = 0x100000000
leaq 112(%rsp), %rsi
xorl %edi, %edi
callq clock_gettime
movl 4(%rsp), %eax
movl %eax, %edx
shrl $31, %edx
addl %eax, %edx
sarl %edx
orq %rbx, %rdx
leaq 1(%rbx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_10
# %bb.9:
movq 8(%rsp), %rdi
movl 4(%rsp), %esi
callq _Z23__device_stub__GPU_Sum1Pii
.LBB4_10:
callq hipDeviceSynchronize
leaq 96(%rsp), %rsi
xorl %edi, %edi
callq clock_gettime
leaq 80(%rsp), %rsi
xorl %edi, %edi
callq clock_gettime
movslq 4(%rsp), %r8
movl %r8d, %edx
shrl $31, %edx
addl %r8d, %edx
sarl %edx
shlq $2, %r8
orq %rbx, %rdx
incq %rbx
movq %rbx, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_12
# %bb.11:
movq 16(%rsp), %rdi
movl 4(%rsp), %esi
callq _Z23__device_stub__GPU_Sum2Pii
.LBB4_12:
callq hipDeviceSynchronize
leaq 64(%rsp), %rsi
xorl %edi, %edi
callq clock_gettime
leaq 48(%rsp), %rsi
xorl %edi, %edi
callq clock_gettime
movl 4(%rsp), %eax
cmpl $2, %eax
jl .LBB4_15
# %bb.13: # %.lr.ph.preheader.i
movq 24(%rsp), %rcx
movl -4(%rcx,%rax,4), %edx
incq %rax
.p2align 4, 0x90
.LBB4_14: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
addl -12(%rcx,%rax,4), %edx
movl %edx, -12(%rcx,%rax,4)
decq %rax
cmpq $2, %rax
jg .LBB4_14
.LBB4_15: # %_Z7CPU_SumPii.exit
leaq 32(%rsp), %rsi
xorl %edi, %edi
callq clock_gettime
movq 8(%rsp), %rax
movl (%rax), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movq 16(%rsp), %rax
movl (%rax), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movq 24(%rsp), %rax
movl (%rax), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
movq 96(%rsp), %rax
movq 104(%rsp), %rcx
subq 112(%rsp), %rax
cvtsi2sd %rax, %xmm1
subq 120(%rsp), %rcx
cvtsi2sd %rcx, %xmm0
mulsd .LCPI4_0(%rip), %xmm0
addsd %xmm1, %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
movq 64(%rsp), %rax
movq 72(%rsp), %rcx
subq 80(%rsp), %rax
xorps %xmm1, %xmm1
cvtsi2sd %rax, %xmm1
subq 88(%rsp), %rcx
xorps %xmm0, %xmm0
cvtsi2sd %rcx, %xmm0
mulsd .LCPI4_0(%rip), %xmm0
addsd %xmm1, %xmm0
movl $.L.str.7, %edi
movb $1, %al
callq printf
movq 32(%rsp), %rax
movq 40(%rsp), %rcx
subq 48(%rsp), %rax
xorps %xmm1, %xmm1
cvtsi2sd %rax, %xmm1
subq 56(%rsp), %rcx
xorps %xmm0, %xmm0
cvtsi2sd %rcx, %xmm0
mulsd .LCPI4_0(%rip), %xmm0
addsd %xmm1, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
movq 8(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
callq hipDeviceReset
.LBB4_16:
xorl %eax, %eax
addq $128, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8GPU_Sum1Pii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8GPU_Sum2Pii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8GPU_Sum1Pii,@object # @_Z8GPU_Sum1Pii
.section .rodata,"a",@progbits
.globl _Z8GPU_Sum1Pii
.p2align 3, 0x0
_Z8GPU_Sum1Pii:
.quad _Z23__device_stub__GPU_Sum1Pii
.size _Z8GPU_Sum1Pii, 8
.type _Z8GPU_Sum2Pii,@object # @_Z8GPU_Sum2Pii
.globl _Z8GPU_Sum2Pii
.p2align 3, 0x0
_Z8GPU_Sum2Pii:
.quad _Z23__device_stub__GPU_Sum2Pii
.size _Z8GPU_Sum2Pii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter the value of n: "
.size .L.str, 23
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%d"
.size .L.str.1, 3
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "\nResult of the GPU (without shared memory) : %d\n"
.size .L.str.3, 49
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Result of the GPU (with shared memory) : %d\n"
.size .L.str.4, 50
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Result of the CPU : %d\n"
.size .L.str.5, 50
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "\nTime taken by GPU (no shared memory) is : %lf\n"
.size .L.str.6, 49
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Time taken by GPU (with shared memory) is : %lf\n"
.size .L.str.7, 50
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Time taken by CPU is : %lf\n"
.size .L.str.8, 50
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z8GPU_Sum1Pii"
.size .L__unnamed_1, 15
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z8GPU_Sum2Pii"
.size .L__unnamed_2, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr.2,@object # @str.2
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr.2:
.asciz "Malloc Error!"
.size .Lstr.2, 14
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__GPU_Sum1Pii
.addrsig_sym _Z23__device_stub__GPU_Sum2Pii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8GPU_Sum1Pii
.addrsig_sym _Z8GPU_Sum2Pii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z8GPU_Sum2Pii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R9, R7, c[0x0][0x160] ; /* 0x0000580009027625 */
/* 0x001fcc00078e0207 */
/*0050*/ IMAD.WIDE R4, R7, c[0x0][0x0], R2 ; /* 0x0000000007047a25 */
/* 0x000fe400078e0202 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ee2000c1e1900 */
/*0080*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff067624 */
/* 0x000fe200078e00ff */
/*0090*/ SHF.L.U32 R0, R9.reuse, 0x2, RZ ; /* 0x0000000209007819 */
/* 0x040fe400000006ff */
/*00a0*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fc40003f05270 */
/*00b0*/ ISETP.GE.AND P1, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x000fe20003f26270 */
/*00c0*/ IMAD R7, R7, c[0x0][0x0], R0 ; /* 0x0000000007077a24 */
/* 0x000fe200078e0200 */
/*00d0*/ STS [R9.X4], R2 ; /* 0x0000000209007388 */
/* 0x0041e80000004800 */
/*00e0*/ STS [R7], R4 ; /* 0x0000000407007388 */
/* 0x0081e80000000800 */
/*00f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0100*/ @!P1 BRA 0x1c0 ; /* 0x000000b000009947 */
/* 0x000fea0003800000 */
/*0110*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff037624 */
/* 0x001fca00078e00ff */
/*0120*/ ISETP.GE.AND P1, PT, R9, R3, PT ; /* 0x000000030900720c */
/* 0x000fda0003f26270 */
/*0130*/ @!P1 LEA R2, R3, R0, 0x2 ; /* 0x0000000003029211 */
/* 0x000fe200078e10ff */
/*0140*/ @!P1 LDS R4, [R9.X4] ; /* 0x0000000009049984 */
/* 0x000fe20000004800 */
/*0150*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc60000011603 */
/*0160*/ @!P1 LDS R5, [R2] ; /* 0x0000000002059984 */
/* 0x000e240000000800 */
/*0170*/ @!P1 IMAD.IADD R4, R4, 0x1, R5 ; /* 0x0000000104049824 */
/* 0x001fca00078e0205 */
/*0180*/ @!P1 STS [R9.X4], R4 ; /* 0x0000000409009388 */
/* 0x0001e80000004800 */
/*0190*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*01a0*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f25270 */
/*01b0*/ @P1 BRA 0x120 ; /* 0xffffff6000001947 */
/* 0x001fea000383ffff */
/*01c0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*01d0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*01e0*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */
/* 0x000fe20000000f00 */
/*01f0*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */
/* 0x000fca00078e00ff */
/*0200*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0210*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0220*/ BRA 0x220; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z8GPU_Sum1Pii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fc80000000f00 */
/*0020*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fda0003f06270 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0050*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0060*/ MOV R7, c[0x0][0x0] ; /* 0x0000000000077a02 */
/* 0x000fe20000000f00 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0080*/ IMAD.WIDE R2, R6, R3, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0203 */
/*0090*/ ISETP.GE.AND P0, PT, R6, R7, PT ; /* 0x000000070600720c */
/* 0x000fe20003f06270 */
/*00a0*/ BSSY B0, 0x120 ; /* 0x0000007000007945 */
/* 0x000fd80003800000 */
/*00b0*/ @P0 BRA 0x110 ; /* 0x0000005000000947 */
/* 0x001fea0003800000 */
/*00c0*/ IMAD.WIDE R4, R7, 0x4, R2 ; /* 0x0000000407047825 */
/* 0x000fe200078e0202 */
/*00d0*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000eaa000c1e1900 */
/*00e0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea4000c1e1900 */
/*00f0*/ IMAD.IADD R9, R0, 0x1, R5 ; /* 0x0000000100097824 */
/* 0x004fca00078e0205 */
/*0100*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x0001e4000c101904 */
/*0110*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0120*/ SHF.R.U32.HI R7, RZ, 0x1, R7 ; /* 0x00000001ff077819 */
/* 0x000fe20000011607 */
/*0130*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe60000010000 */
/*0140*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fda0003f05270 */
/*0150*/ @P0 BRA 0x90 ; /* 0xffffff3000000947 */
/* 0x000fea000383ffff */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8GPU_Sum1Pii
.globl _Z8GPU_Sum1Pii
.p2align 8
.type _Z8GPU_Sum1Pii,@function
_Z8GPU_Sum1Pii:
s_load_b32 s2, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
v_cmp_eq_u16_e64 s3, s2, 0
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s3
s_cbranch_vccnz .LBB0_5
s_load_b64 s[0:1], s[0:1], 0x0
v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 2, v0
s_and_b32 s2, 0xffff, s2
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v3, s3, s0, v1
v_add_co_ci_u32_e64 v4, null, s1, 0, s3
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s3
s_lshr_b32 s3, s2, 1
s_cmp_gt_u32 s2, 1
s_mov_b32 s2, s3
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_5
.LBB0_3:
s_mov_b32 s3, exec_lo
v_cmpx_gt_u32_e64 s2, v0
s_cbranch_execz .LBB0_2
v_add_nc_u32_e32 v1, s2, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[1:2]
v_add_co_u32 v5, vcc_lo, s0, v5
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s1, v6, vcc_lo
s_clause 0x1
global_load_b32 v1, v[5:6], off
global_load_b32 v5, v[3:4], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v5, v1
global_store_b32 v[3:4], v1, off
s_branch .LBB0_2
.LBB0_5:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8GPU_Sum1Pii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 4
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8GPU_Sum1Pii, .Lfunc_end0-_Z8GPU_Sum1Pii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z8GPU_Sum2Pii
.globl _Z8GPU_Sum2Pii
.p2align 8
.type _Z8GPU_Sum2Pii,@function
_Z8GPU_Sum2Pii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1)
v_add_lshl_u32 v2, v0, s2, 2
s_cmp_eq_u32 s2, 0
s_clause 0x1
global_load_b32 v3, v1, s[0:1]
global_load_b32 v4, v2, s[0:1]
v_add_nc_u32_e32 v1, 0, v1
v_add_nc_u32_e32 v2, 0, v2
s_waitcnt vmcnt(1)
ds_store_b32 v1, v3
s_waitcnt vmcnt(0)
ds_store_b32 v2, v4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB1_5
.LBB1_1:
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB1_3
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v1, v0, s[0:1]
.LBB1_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.p2align 6
.LBB1_4:
s_or_b32 exec_lo, exec_lo, s3
s_lshr_b32 s3, s2, 1
s_cmp_gt_u32 s2, 1
s_mov_b32 s2, s3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB1_1
.LBB1_5:
s_mov_b32 s3, exec_lo
v_cmpx_gt_u32_e64 s2, v0
s_cbranch_execz .LBB1_4
v_add_nc_u32_e32 v2, s2, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v2, v2, 2, 0
ds_load_b32 v2, v2
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB1_4
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8GPU_Sum2Pii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z8GPU_Sum2Pii, .Lfunc_end1-_Z8GPU_Sum2Pii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8GPU_Sum1Pii
.private_segment_fixed_size: 0
.sgpr_count: 6
.sgpr_spill_count: 0
.symbol: _Z8GPU_Sum1Pii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
- .offset: 136
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8GPU_Sum2Pii
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z8GPU_Sum2Pii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000685d4_00000000-6_ArraySum.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z12time_elapsedP8timespecS0_
.type _Z12time_elapsedP8timespecS0_, @function
_Z12time_elapsedP8timespecS0_:
.LFB2057:
.cfi_startproc
endbr64
movq (%rsi), %rax
subq (%rdi), %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
movq 8(%rsi), %rax
subq 8(%rdi), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
mulsd .LC0(%rip), %xmm0
addsd %xmm1, %xmm0
ret
.cfi_endproc
.LFE2057:
.size _Z12time_elapsedP8timespecS0_, .-_Z12time_elapsedP8timespecS0_
.globl _Z7CPU_SumPii
.type _Z7CPU_SumPii, @function
_Z7CPU_SumPii:
.LFB2058:
.cfi_startproc
endbr64
leal -1(%rsi), %eax
testl %eax, %eax
jle .L4
cltq
leaq -4(%rdi,%rax,4), %rax
movslq %esi, %rdx
leaq -12(%rdi,%rdx,4), %rcx
leal -2(%rsi), %edx
salq $2, %rdx
subq %rdx, %rcx
.L6:
movl 4(%rax), %edx
addl %edx, (%rax)
subq $4, %rax
cmpq %rcx, %rax
jne .L6
.L4:
ret
.cfi_endproc
.LFE2058:
.size _Z7CPU_SumPii, .-_Z7CPU_SumPii
.globl _Z28__device_stub__Z8GPU_Sum1PiiPii
.type _Z28__device_stub__Z8GPU_Sum1PiiPii, @function
_Z28__device_stub__Z8GPU_Sum1PiiPii:
.LFB2084:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L12
.L8:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L13
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z8GPU_Sum1Pii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L8
.L13:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z28__device_stub__Z8GPU_Sum1PiiPii, .-_Z28__device_stub__Z8GPU_Sum1PiiPii
.globl _Z8GPU_Sum1Pii
.type _Z8GPU_Sum1Pii, @function
_Z8GPU_Sum1Pii:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z8GPU_Sum1PiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z8GPU_Sum1Pii, .-_Z8GPU_Sum1Pii
.globl _Z28__device_stub__Z8GPU_Sum2PiiPii
.type _Z28__device_stub__Z8GPU_Sum2PiiPii, @function
_Z28__device_stub__Z8GPU_Sum2PiiPii:
.LFB2086:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z8GPU_Sum2Pii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z28__device_stub__Z8GPU_Sum2PiiPii, .-_Z28__device_stub__Z8GPU_Sum2PiiPii
.globl _Z8GPU_Sum2Pii
.type _Z8GPU_Sum2Pii, @function
_Z8GPU_Sum2Pii:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z8GPU_Sum2PiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z8GPU_Sum2Pii, .-_Z8GPU_Sum2Pii
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Enter the value of n: "
.LC2:
.string "%d"
.LC3:
.string "Malloc Error!\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "\nResult of the GPU (without shared memory) : %d\n"
.align 8
.LC5:
.string "Result of the GPU (with shared memory) : %d\n"
.align 8
.LC6:
.string "Result of the CPU : %d\n"
.align 8
.LC7:
.string "\nTime taken by GPU (no shared memory) is : %lf\n"
.align 8
.LC8:
.string "Time taken by GPU (with shared memory) is : %lf\n"
.align 8
.LC9:
.string "Time taken by CPU is : %lf\n"
.text
.globl main
.type main, @function
main:
.LFB2059:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $144, %rsp
.cfi_def_cfa_offset 176
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq .LC1(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
leaq 4(%rsp), %rsi
leaq .LC2(%rip), %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movslq 4(%rsp), %rsi
salq $2, %rsi
leaq 8(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L38
movslq 4(%rsp), %rsi
salq $2, %rsi
leaq 16(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L39
movslq 4(%rsp), %rsi
salq $2, %rsi
leaq 24(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L28
movl $0, %ebx
cmpl $0, 4(%rsp)
jle .L30
.L29:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
movq 8(%rsp), %rdx
movl %eax, (%rdx,%rbx,4)
movq 8(%rsp), %rax
movl (%rax,%rbx,4), %edx
movq 16(%rsp), %rax
movl %edx, (%rax,%rbx,4)
movq 16(%rsp), %rax
movl (%rax,%rbx,4), %edx
movq 24(%rsp), %rax
movl %edx, (%rax,%rbx,4)
addq $1, %rbx
cmpl %ebx, 4(%rsp)
jg .L29
.L30:
leaq 32(%rsp), %rsi
movl $0, %edi
call clock_gettime@PLT
movl 4(%rsp), %eax
movl $2, %ecx
cltd
idivl %ecx
movl %eax, 112(%rsp)
movl $1, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 112(%rsp), %rdx
movl $1, %ecx
movq 96(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L40
.L31:
call cudaDeviceSynchronize@PLT
leaq 48(%rsp), %rsi
movl $0, %edi
call clock_gettime@PLT
leaq 64(%rsp), %rsi
movl $0, %edi
call clock_gettime@PLT
movl 4(%rsp), %ecx
movl $2, %esi
movl %ecx, %eax
cltd
idivl %esi
movl %eax, 112(%rsp)
movl $1, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movslq %ecx, %rcx
movl $0, %r9d
leaq 0(,%rcx,4), %r8
movq 112(%rsp), %rdx
movl $1, %ecx
movq 96(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L41
.L32:
call cudaDeviceSynchronize@PLT
leaq 80(%rsp), %r12
movq %r12, %rsi
movl $0, %edi
call clock_gettime@PLT
leaq 96(%rsp), %rbx
movq %rbx, %rsi
movl $0, %edi
call clock_gettime@PLT
movl 4(%rsp), %esi
movq 24(%rsp), %rdi
call _Z7CPU_SumPii
leaq 112(%rsp), %rbp
movq %rbp, %rsi
movl $0, %edi
call clock_gettime@PLT
movq 8(%rsp), %rax
movl (%rax), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 16(%rsp), %rax
movl (%rax), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 24(%rsp), %rax
movl (%rax), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 48(%rsp), %rsi
leaq 32(%rsp), %rdi
call _Z12time_elapsedP8timespecS0_
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq 64(%rsp), %rdi
movq %r12, %rsi
call _Z12time_elapsedP8timespecS0_
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %rbp, %rsi
movq %rbx, %rdi
call _Z12time_elapsedP8timespecS0_
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
call cudaDeviceReset@PLT
.L26:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L42
movl $0, %eax
addq $144, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L38:
.cfi_restore_state
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L26
.L39:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
jmp .L26
.L28:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
jmp .L26
.L40:
movl 4(%rsp), %esi
movq 8(%rsp), %rdi
call _Z28__device_stub__Z8GPU_Sum1PiiPii
jmp .L31
.L41:
movl 4(%rsp), %esi
movq 16(%rsp), %rdi
call _Z28__device_stub__Z8GPU_Sum2PiiPii
jmp .L32
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z8GPU_Sum2Pii"
.LC11:
.string "_Z8GPU_Sum1Pii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2089:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z8GPU_Sum2Pii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z8GPU_Sum1Pii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -400107883
.long 1041313291
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "ArraySum.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z12time_elapsedP8timespecS0_
.LCPI0_0:
.quad 0x3e112e0be826d695 # double 1.0000000000000001E-9
.text
.globl _Z12time_elapsedP8timespecS0_
.p2align 4, 0x90
.type _Z12time_elapsedP8timespecS0_,@function
_Z12time_elapsedP8timespecS0_: # @_Z12time_elapsedP8timespecS0_
.cfi_startproc
# %bb.0:
movq (%rsi), %rax
movq 8(%rsi), %rcx
subq (%rdi), %rax
cvtsi2sd %rax, %xmm1
subq 8(%rdi), %rcx
cvtsi2sd %rcx, %xmm0
mulsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
retq
.Lfunc_end0:
.size _Z12time_elapsedP8timespecS0_, .Lfunc_end0-_Z12time_elapsedP8timespecS0_
.cfi_endproc
# -- End function
.globl _Z23__device_stub__GPU_Sum1Pii # -- Begin function _Z23__device_stub__GPU_Sum1Pii
.p2align 4, 0x90
.type _Z23__device_stub__GPU_Sum1Pii,@function
_Z23__device_stub__GPU_Sum1Pii: # @_Z23__device_stub__GPU_Sum1Pii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z8GPU_Sum1Pii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z23__device_stub__GPU_Sum1Pii, .Lfunc_end1-_Z23__device_stub__GPU_Sum1Pii
.cfi_endproc
# -- End function
.globl _Z23__device_stub__GPU_Sum2Pii # -- Begin function _Z23__device_stub__GPU_Sum2Pii
.p2align 4, 0x90
.type _Z23__device_stub__GPU_Sum2Pii,@function
_Z23__device_stub__GPU_Sum2Pii: # @_Z23__device_stub__GPU_Sum2Pii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z8GPU_Sum2Pii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end2:
.size _Z23__device_stub__GPU_Sum2Pii, .Lfunc_end2-_Z23__device_stub__GPU_Sum2Pii
.cfi_endproc
# -- End function
.globl _Z7CPU_SumPii # -- Begin function _Z7CPU_SumPii
.p2align 4, 0x90
.type _Z7CPU_SumPii,@function
_Z7CPU_SumPii: # @_Z7CPU_SumPii
.cfi_startproc
# %bb.0:
cmpl $2, %esi
jl .LBB3_3
# %bb.1: # %.lr.ph.preheader
movl %esi, %eax
movl -4(%rdi,%rax,4), %ecx
incq %rax
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
addl -12(%rdi,%rax,4), %ecx
movl %ecx, -12(%rdi,%rax,4)
decq %rax
cmpq $2, %rax
jg .LBB3_2
.LBB3_3: # %._crit_edge
retq
.Lfunc_end3:
.size _Z7CPU_SumPii, .Lfunc_end3-_Z7CPU_SumPii
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI4_0:
.quad 0x3e112e0be826d695 # double 1.0000000000000001E-9
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $128, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -16
movl $.L.str, %edi
xorl %eax, %eax
callq printf
leaq 4(%rsp), %rsi
movl $.L.str.1, %edi
xorl %eax, %eax
callq __isoc23_scanf
movslq 4(%rsp), %rsi
shlq $2, %rsi
leaq 8(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
je .LBB4_2
# %bb.1:
movl $.Lstr.2, %edi
callq puts@PLT
jmp .LBB4_16
.LBB4_2:
movslq 4(%rsp), %rsi
shlq $2, %rsi
leaq 16(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
je .LBB4_4
# %bb.3:
movl $.Lstr.2, %edi
callq puts@PLT
movq 8(%rsp), %rdi
callq hipFree
jmp .LBB4_16
.LBB4_4:
movslq 4(%rsp), %rsi
shlq $2, %rsi
leaq 24(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
je .LBB4_5
# %bb.17:
movl $.Lstr.2, %edi
callq puts@PLT
movq 8(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
jmp .LBB4_16
.LBB4_5: # %.preheader
cmpl $0, 4(%rsp)
jle .LBB4_8
# %bb.6: # %.lr.ph.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_7: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movq 8(%rsp), %rcx
movl %eax, (%rcx,%rbx,4)
movq 16(%rsp), %rcx
movl %eax, (%rcx,%rbx,4)
movq 24(%rsp), %rcx
movl %eax, (%rcx,%rbx,4)
incq %rbx
movslq 4(%rsp), %rax
cmpq %rax, %rbx
jl .LBB4_7
.LBB4_8: # %._crit_edge
movabsq $4294967296, %rbx # imm = 0x100000000
leaq 112(%rsp), %rsi
xorl %edi, %edi
callq clock_gettime
movl 4(%rsp), %eax
movl %eax, %edx
shrl $31, %edx
addl %eax, %edx
sarl %edx
orq %rbx, %rdx
leaq 1(%rbx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_10
# %bb.9:
movq 8(%rsp), %rdi
movl 4(%rsp), %esi
callq _Z23__device_stub__GPU_Sum1Pii
.LBB4_10:
callq hipDeviceSynchronize
leaq 96(%rsp), %rsi
xorl %edi, %edi
callq clock_gettime
leaq 80(%rsp), %rsi
xorl %edi, %edi
callq clock_gettime
movslq 4(%rsp), %r8
movl %r8d, %edx
shrl $31, %edx
addl %r8d, %edx
sarl %edx
shlq $2, %r8
orq %rbx, %rdx
incq %rbx
movq %rbx, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_12
# %bb.11:
movq 16(%rsp), %rdi
movl 4(%rsp), %esi
callq _Z23__device_stub__GPU_Sum2Pii
.LBB4_12:
callq hipDeviceSynchronize
leaq 64(%rsp), %rsi
xorl %edi, %edi
callq clock_gettime
leaq 48(%rsp), %rsi
xorl %edi, %edi
callq clock_gettime
movl 4(%rsp), %eax
cmpl $2, %eax
jl .LBB4_15
# %bb.13: # %.lr.ph.preheader.i
movq 24(%rsp), %rcx
movl -4(%rcx,%rax,4), %edx
incq %rax
.p2align 4, 0x90
.LBB4_14: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
addl -12(%rcx,%rax,4), %edx
movl %edx, -12(%rcx,%rax,4)
decq %rax
cmpq $2, %rax
jg .LBB4_14
.LBB4_15: # %_Z7CPU_SumPii.exit
leaq 32(%rsp), %rsi
xorl %edi, %edi
callq clock_gettime
movq 8(%rsp), %rax
movl (%rax), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movq 16(%rsp), %rax
movl (%rax), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movq 24(%rsp), %rax
movl (%rax), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
movq 96(%rsp), %rax
movq 104(%rsp), %rcx
subq 112(%rsp), %rax
cvtsi2sd %rax, %xmm1
subq 120(%rsp), %rcx
cvtsi2sd %rcx, %xmm0
mulsd .LCPI4_0(%rip), %xmm0
addsd %xmm1, %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
movq 64(%rsp), %rax
movq 72(%rsp), %rcx
subq 80(%rsp), %rax
xorps %xmm1, %xmm1
cvtsi2sd %rax, %xmm1
subq 88(%rsp), %rcx
xorps %xmm0, %xmm0
cvtsi2sd %rcx, %xmm0
mulsd .LCPI4_0(%rip), %xmm0
addsd %xmm1, %xmm0
movl $.L.str.7, %edi
movb $1, %al
callq printf
movq 32(%rsp), %rax
movq 40(%rsp), %rcx
subq 48(%rsp), %rax
xorps %xmm1, %xmm1
cvtsi2sd %rax, %xmm1
subq 56(%rsp), %rcx
xorps %xmm0, %xmm0
cvtsi2sd %rcx, %xmm0
mulsd .LCPI4_0(%rip), %xmm0
addsd %xmm1, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
movq 8(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
callq hipDeviceReset
.LBB4_16:
xorl %eax, %eax
addq $128, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8GPU_Sum1Pii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8GPU_Sum2Pii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8GPU_Sum1Pii,@object # @_Z8GPU_Sum1Pii
.section .rodata,"a",@progbits
.globl _Z8GPU_Sum1Pii
.p2align 3, 0x0
_Z8GPU_Sum1Pii:
.quad _Z23__device_stub__GPU_Sum1Pii
.size _Z8GPU_Sum1Pii, 8
.type _Z8GPU_Sum2Pii,@object # @_Z8GPU_Sum2Pii
.globl _Z8GPU_Sum2Pii
.p2align 3, 0x0
_Z8GPU_Sum2Pii:
.quad _Z23__device_stub__GPU_Sum2Pii
.size _Z8GPU_Sum2Pii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter the value of n: "
.size .L.str, 23
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%d"
.size .L.str.1, 3
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "\nResult of the GPU (without shared memory) : %d\n"
.size .L.str.3, 49
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Result of the GPU (with shared memory) : %d\n"
.size .L.str.4, 50
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Result of the CPU : %d\n"
.size .L.str.5, 50
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "\nTime taken by GPU (no shared memory) is : %lf\n"
.size .L.str.6, 49
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Time taken by GPU (with shared memory) is : %lf\n"
.size .L.str.7, 50
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Time taken by CPU is : %lf\n"
.size .L.str.8, 50
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z8GPU_Sum1Pii"
.size .L__unnamed_1, 15
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z8GPU_Sum2Pii"
.size .L__unnamed_2, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr.2,@object # @str.2
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr.2:
.asciz "Malloc Error!"
.size .Lstr.2, 14
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__GPU_Sum1Pii
.addrsig_sym _Z23__device_stub__GPU_Sum2Pii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8GPU_Sum1Pii
.addrsig_sym _Z8GPU_Sum2Pii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include <cuda.h>
#include <time.h>
#include <math.h>
#define row 100
#define col 100
void handle_error(cudaError_t error) {
if (error != cudaSuccess) {
std::cout << "Error in cuda. Waiting...";
exit(0);
}
}
int get_rand_in_range() {
return rand()%256;
}
__global__ void convert(float *input_image, float *output_image, int no_of_threads) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
for(int i = index; i <= (row*col); i = i + (blockDim.x*no_of_threads)) {
float r = input_image[3*i];
float g = input_image[3*i + 1];
float b = input_image[3*i + 2] ;
output_image[index] = (0.21*r + 0.71*g + 0.07*b);
}
}
void initialise_matrix(float A[], int m, int n) {
for(int i = 0; i < m; i++) {
for(int j = 0; j < n; j++) {
for(int k = 0; k < 3; k++ ) {
A[(i*n + j)*3 + k] = get_rand_in_range();
}
}
}
}
int main() {
srand(time(NULL));
float image[row * col * 3], gray_image[row * col];
float *device_image, *output_image;
handle_error(cudaMalloc((void **)&device_image, row * col * 3 * sizeof(float)));
handle_error(cudaMalloc((void **)&output_image, row * col * sizeof(float)));
initialise_matrix(image, row, col);
cudaMemcpy(device_image, image, row * col * 3 * sizeof(float), cudaMemcpyHostToDevice);
dim3 grid_dim(256,1,1);
dim3 block_dim(256,1,1);
convert<<<grid_dim, block_dim>>>(device_image, output_image, 256);
cudaMemcpy(gray_image, output_image, row * col * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(device_image);
cudaFree(output_image);
return 0;
} | code for sm_80
Function : _Z7convertPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.AND P0, PT, R0, 0x2710, PT ; /* 0x000027100000780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R15, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0f7435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R0, R15, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fc800078e020f */
/*0090*/ IMAD R4, R0, 0x3, RZ ; /* 0x0000000300047824 */
/* 0x000fc800078e02ff */
/*00a0*/ IMAD.WIDE R4, R4, R15, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fca00078e020f */
/*00b0*/ LDG.E R13, [R4.64+0x4] ; /* 0x00000404040d7981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ LDG.E R12, [R4.64] ; /* 0x00000004040c7981 */
/* 0x000ee8000c1e1900 */
/*00d0*/ LDG.E R10, [R4.64+0x8] ; /* 0x00000804040a7981 */
/* 0x000f22000c1e1900 */
/*00e0*/ F2F.F64.F32 R8, R13 ; /* 0x0000000d00087310 */
/* 0x005e300000201800 */
/*00f0*/ F2F.F64.F32 R6, R12 ; /* 0x0000000c00067310 */
/* 0x008e700000201800 */
/*0100*/ F2F.F64.F32 R10, R10 ; /* 0x0000000a000a7310 */
/* 0x010ea20000201800 */
/*0110*/ DMUL R8, R8, c[0x2][0x0] ; /* 0x0080000008087a28 */
/* 0x001e4c0000000000 */
/*0120*/ DFMA R6, R6, c[0x2][0x8], R8 ; /* 0x0080020006067a2b */
/* 0x0020a40000000008 */
/*0130*/ MOV R9, c[0x0][0x0] ; /* 0x0000000000097a02 */
/* 0x001fca0000000f00 */
/*0140*/ IMAD R0, R9, c[0x0][0x170], R0 ; /* 0x00005c0009007a24 */
/* 0x000fe200078e0200 */
/*0150*/ DFMA R6, R10, c[0x2][0x10], R6 ; /* 0x008004000a067a2b */
/* 0x004e080000000006 */
/*0160*/ ISETP.GE.AND P0, PT, R0, 0x2711, PT ; /* 0x000027110000780c */
/* 0x000fcc0003f06270 */
/*0170*/ F2F.F32.F64 R7, R6 ; /* 0x0000000600077310 */
/* 0x001e240000301000 */
/*0180*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0011ea000c101904 */
/*0190*/ @!P0 BRA 0x90 ; /* 0xfffffef000008947 */
/* 0x000fea000383ffff */
/*01a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01b0*/ BRA 0x1b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <cuda.h>
#include <time.h>
#include <math.h>
#define row 100
#define col 100
void handle_error(cudaError_t error) {
if (error != cudaSuccess) {
std::cout << "Error in cuda. Waiting...";
exit(0);
}
}
int get_rand_in_range() {
return rand()%256;
}
__global__ void convert(float *input_image, float *output_image, int no_of_threads) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
for(int i = index; i <= (row*col); i = i + (blockDim.x*no_of_threads)) {
float r = input_image[3*i];
float g = input_image[3*i + 1];
float b = input_image[3*i + 2] ;
output_image[index] = (0.21*r + 0.71*g + 0.07*b);
}
}
void initialise_matrix(float A[], int m, int n) {
for(int i = 0; i < m; i++) {
for(int j = 0; j < n; j++) {
for(int k = 0; k < 3; k++ ) {
A[(i*n + j)*3 + k] = get_rand_in_range();
}
}
}
}
int main() {
srand(time(NULL));
float image[row * col * 3], gray_image[row * col];
float *device_image, *output_image;
handle_error(cudaMalloc((void **)&device_image, row * col * 3 * sizeof(float)));
handle_error(cudaMalloc((void **)&output_image, row * col * sizeof(float)));
initialise_matrix(image, row, col);
cudaMemcpy(device_image, image, row * col * 3 * sizeof(float), cudaMemcpyHostToDevice);
dim3 grid_dim(256,1,1);
dim3 block_dim(256,1,1);
convert<<<grid_dim, block_dim>>>(device_image, output_image, 256);
cudaMemcpy(gray_image, output_image, row * col * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(device_image);
cudaFree(output_image);
return 0;
} | .file "tmpxft_00063fea_00000000-6_prog5.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3675:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3675:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Error in cuda. Waiting..."
.text
.globl _Z12handle_error9cudaError
.type _Z12handle_error9cudaError, @function
_Z12handle_error9cudaError:
.LFB3669:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L8
ret
.L8:
subq $8, %rsp
.cfi_def_cfa_offset 16
movl $25, %edx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl $0, %edi
call exit@PLT
.cfi_endproc
.LFE3669:
.size _Z12handle_error9cudaError, .-_Z12handle_error9cudaError
.globl _Z17get_rand_in_rangev
.type _Z17get_rand_in_rangev, @function
_Z17get_rand_in_rangev:
.LFB3670:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call rand@PLT
cltd
shrl $24, %edx
addl %edx, %eax
movzbl %al, %eax
subl %edx, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3670:
.size _Z17get_rand_in_rangev, .-_Z17get_rand_in_rangev
.globl _Z17initialise_matrixPfii
.type _Z17initialise_matrixPfii, @function
_Z17initialise_matrixPfii:
.LFB3671:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 24(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
testl %esi, %esi
jle .L11
leal (%rdx,%rdx,2), %eax
movl %eax, 20(%rsp)
movl %eax, %r13d
movl $0, %r14d
movl $0, %r15d
jmp .L13
.L19:
addl $3, %r12d
addq $12, %rbp
cmpl %r13d, %r12d
je .L15
.L16:
movl $0, %ebx
.L14:
call _Z17get_rand_in_rangev
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rbx,4)
addq $1, %rbx
cmpq $3, %rbx
jne .L14
jmp .L19
.L15:
addl $1, %r15d
movl 20(%rsp), %eax
addl %eax, %r13d
movl 12(%rsp), %eax
addl %eax, %r14d
cmpl %r15d, 16(%rsp)
je .L11
.L13:
cmpl $0, 12(%rsp)
jle .L15
leal (%r14,%r14,2), %r12d
movslq %r14d, %rax
leaq (%rax,%rax,2), %rax
movq 24(%rsp), %rcx
leaq (%rcx,%rax,4), %rbp
jmp .L16
.L11:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3671:
.size _Z17initialise_matrixPfii, .-_Z17initialise_matrixPfii
.globl _Z29__device_stub__Z7convertPfS_iPfS_i
.type _Z29__device_stub__Z7convertPfS_iPfS_i, @function
_Z29__device_stub__Z7convertPfS_iPfS_i:
.LFB3697:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L24
.L20:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L25
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7convertPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L20
.L25:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3697:
.size _Z29__device_stub__Z7convertPfS_iPfS_i, .-_Z29__device_stub__Z7convertPfS_iPfS_i
.globl _Z7convertPfS_i
.type _Z7convertPfS_i, @function
_Z7convertPfS_i:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z7convertPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _Z7convertPfS_i, .-_Z7convertPfS_i
.globl main
.type main, @function
main:
.LFB3672:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq -159744(%rsp), %r11
.cfi_def_cfa 11, 159760
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $320, %rsp
.cfi_def_cfa_offset 160080
movq %fs:40, %rax
movq %rax, 160056(%rsp)
xorl %eax, %eax
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
leaq 8(%rsp), %rdi
movl $120000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call _Z12handle_error9cudaError
leaq 16(%rsp), %rdi
movl $40000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call _Z12handle_error9cudaError
leaq 40048(%rsp), %rbx
movl $100, %edx
movl $100, %esi
movq %rbx, %rdi
call _Z17initialise_matrixPfii
movl $1, %ecx
movl $120000, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $256, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L32
.L29:
leaq 48(%rsp), %rdi
movl $2, %ecx
movl $40000, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 160056(%rsp), %rax
subq %fs:40, %rax
jne .L33
movl $0, %eax
addq $160064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L32:
.cfi_restore_state
movl $256, %edx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z29__device_stub__Z7convertPfS_iPfS_i
jmp .L29
.L33:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3672:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z7convertPfS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3700:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z7convertPfS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3700:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <cuda.h>
#include <time.h>
#include <math.h>
#define row 100
#define col 100
void handle_error(cudaError_t error) {
if (error != cudaSuccess) {
std::cout << "Error in cuda. Waiting...";
exit(0);
}
}
int get_rand_in_range() {
return rand()%256;
}
__global__ void convert(float *input_image, float *output_image, int no_of_threads) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
for(int i = index; i <= (row*col); i = i + (blockDim.x*no_of_threads)) {
float r = input_image[3*i];
float g = input_image[3*i + 1];
float b = input_image[3*i + 2] ;
output_image[index] = (0.21*r + 0.71*g + 0.07*b);
}
}
void initialise_matrix(float A[], int m, int n) {
for(int i = 0; i < m; i++) {
for(int j = 0; j < n; j++) {
for(int k = 0; k < 3; k++ ) {
A[(i*n + j)*3 + k] = get_rand_in_range();
}
}
}
}
int main() {
srand(time(NULL));
float image[row * col * 3], gray_image[row * col];
float *device_image, *output_image;
handle_error(cudaMalloc((void **)&device_image, row * col * 3 * sizeof(float)));
handle_error(cudaMalloc((void **)&output_image, row * col * sizeof(float)));
initialise_matrix(image, row, col);
cudaMemcpy(device_image, image, row * col * 3 * sizeof(float), cudaMemcpyHostToDevice);
dim3 grid_dim(256,1,1);
dim3 block_dim(256,1,1);
convert<<<grid_dim, block_dim>>>(device_image, output_image, 256);
cudaMemcpy(gray_image, output_image, row * col * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(device_image);
cudaFree(output_image);
return 0;
} | #include <iostream>
#include <hip/hip_runtime.h>
#include <time.h>
#include <math.h>
#define row 100
#define col 100
void handle_error(hipError_t error) {
if (error != hipSuccess) {
std::cout << "Error in cuda. Waiting...";
exit(0);
}
}
int get_rand_in_range() {
return rand()%256;
}
__global__ void convert(float *input_image, float *output_image, int no_of_threads) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
for(int i = index; i <= (row*col); i = i + (blockDim.x*no_of_threads)) {
float r = input_image[3*i];
float g = input_image[3*i + 1];
float b = input_image[3*i + 2] ;
output_image[index] = (0.21*r + 0.71*g + 0.07*b);
}
}
void initialise_matrix(float A[], int m, int n) {
for(int i = 0; i < m; i++) {
for(int j = 0; j < n; j++) {
for(int k = 0; k < 3; k++ ) {
A[(i*n + j)*3 + k] = get_rand_in_range();
}
}
}
}
int main() {
srand(time(NULL));
float image[row * col * 3], gray_image[row * col];
float *device_image, *output_image;
handle_error(hipMalloc((void **)&device_image, row * col * 3 * sizeof(float)));
handle_error(hipMalloc((void **)&output_image, row * col * sizeof(float)));
initialise_matrix(image, row, col);
hipMemcpy(device_image, image, row * col * 3 * sizeof(float), hipMemcpyHostToDevice);
dim3 grid_dim(256,1,1);
dim3 block_dim(256,1,1);
convert<<<grid_dim, block_dim>>>(device_image, output_image, 256);
hipMemcpy(gray_image, output_image, row * col * sizeof(float), hipMemcpyDeviceToHost);
hipFree(device_image);
hipFree(output_image);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <iostream>
#include <hip/hip_runtime.h>
#include <time.h>
#include <math.h>
#define row 100
#define col 100
void handle_error(hipError_t error) {
if (error != hipSuccess) {
std::cout << "Error in cuda. Waiting...";
exit(0);
}
}
int get_rand_in_range() {
return rand()%256;
}
__global__ void convert(float *input_image, float *output_image, int no_of_threads) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
for(int i = index; i <= (row*col); i = i + (blockDim.x*no_of_threads)) {
float r = input_image[3*i];
float g = input_image[3*i + 1];
float b = input_image[3*i + 2] ;
output_image[index] = (0.21*r + 0.71*g + 0.07*b);
}
}
void initialise_matrix(float A[], int m, int n) {
for(int i = 0; i < m; i++) {
for(int j = 0; j < n; j++) {
for(int k = 0; k < 3; k++ ) {
A[(i*n + j)*3 + k] = get_rand_in_range();
}
}
}
}
int main() {
srand(time(NULL));
float image[row * col * 3], gray_image[row * col];
float *device_image, *output_image;
handle_error(hipMalloc((void **)&device_image, row * col * 3 * sizeof(float)));
handle_error(hipMalloc((void **)&output_image, row * col * sizeof(float)));
initialise_matrix(image, row, col);
hipMemcpy(device_image, image, row * col * 3 * sizeof(float), hipMemcpyHostToDevice);
dim3 grid_dim(256,1,1);
dim3 block_dim(256,1,1);
convert<<<grid_dim, block_dim>>>(device_image, output_image, 256);
hipMemcpy(gray_image, output_image, row * col * sizeof(float), hipMemcpyDeviceToHost);
hipFree(device_image);
hipFree(output_image);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7convertPfS_i
.globl _Z7convertPfS_i
.p2align 8
.type _Z7convertPfS_i,@function
_Z7convertPfS_i:
s_load_b32 s2, s[0:1], 0x24
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s2, 0xffff
s_mov_b32 s2, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s9, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e32 0x2711, v1
s_cbranch_execz .LBB0_3
s_clause 0x1
s_load_b32 s10, s[0:1], 0x10
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
v_lshl_add_u32 v3, v1, 1, v1
s_mov_b32 s8, 0
s_mov_b32 s5, 0x3fe6b851
s_mov_b32 s4, 0xeb851eb8
v_lshlrev_b64 v[5:6], 2, v[1:2]
s_mov_b32 s7, 0x3fcae147
s_mov_b32 s6, 0xae147ae1
s_waitcnt lgkmcnt(0)
s_mul_i32 s9, s9, s10
s_delay_alu instid0(VALU_DEP_1)
v_add_co_u32 v5, vcc_lo, s2, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v6, vcc_lo
s_mul_i32 s10, s9, 3
s_mov_b32 s3, 0x3fb1eb85
s_mov_b32 s2, 0x1eb851ec
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v4, 31, v3
v_add_nc_u32_e32 v1, s9, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[7:8], 2, v[3:4]
v_add_nc_u32_e32 v3, s10, v3
v_add_co_u32 v7, vcc_lo, s0, v7
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v8, vcc_lo, s1, v8, vcc_lo
v_cmp_lt_i32_e32 vcc_lo, 0x2710, v1
global_load_b96 v[7:9], v[7:8], off
s_or_b32 s8, vcc_lo, s8
s_waitcnt vmcnt(0)
v_cvt_f64_f32_e32 v[10:11], v8
v_cvt_f64_f32_e32 v[7:8], v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[10:11], v[10:11], s[4:5]
v_fma_f64 v[7:8], v[7:8], s[6:7], v[10:11]
v_cvt_f64_f32_e32 v[9:10], v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[7:8], v[9:10], s[2:3], v[7:8]
v_cvt_f32_f64_e32 v0, v[7:8]
global_store_b32 v[5:6], v0, off
s_and_not1_b32 exec_lo, exec_lo, s8
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7convertPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7convertPfS_i, .Lfunc_end0-_Z7convertPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7convertPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7convertPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <iostream>
#include <hip/hip_runtime.h>
#include <time.h>
#include <math.h>
#define row 100
#define col 100
void handle_error(hipError_t error) {
if (error != hipSuccess) {
std::cout << "Error in cuda. Waiting...";
exit(0);
}
}
int get_rand_in_range() {
return rand()%256;
}
__global__ void convert(float *input_image, float *output_image, int no_of_threads) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
for(int i = index; i <= (row*col); i = i + (blockDim.x*no_of_threads)) {
float r = input_image[3*i];
float g = input_image[3*i + 1];
float b = input_image[3*i + 2] ;
output_image[index] = (0.21*r + 0.71*g + 0.07*b);
}
}
void initialise_matrix(float A[], int m, int n) {
for(int i = 0; i < m; i++) {
for(int j = 0; j < n; j++) {
for(int k = 0; k < 3; k++ ) {
A[(i*n + j)*3 + k] = get_rand_in_range();
}
}
}
}
int main() {
srand(time(NULL));
float image[row * col * 3], gray_image[row * col];
float *device_image, *output_image;
handle_error(hipMalloc((void **)&device_image, row * col * 3 * sizeof(float)));
handle_error(hipMalloc((void **)&output_image, row * col * sizeof(float)));
initialise_matrix(image, row, col);
hipMemcpy(device_image, image, row * col * 3 * sizeof(float), hipMemcpyHostToDevice);
dim3 grid_dim(256,1,1);
dim3 block_dim(256,1,1);
convert<<<grid_dim, block_dim>>>(device_image, output_image, 256);
hipMemcpy(gray_image, output_image, row * col * sizeof(float), hipMemcpyDeviceToHost);
hipFree(device_image);
hipFree(output_image);
return 0;
} | .text
.file "prog5.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z12handle_error10hipError_t # -- Begin function _Z12handle_error10hipError_t
.p2align 4, 0x90
.type _Z12handle_error10hipError_t,@function
_Z12handle_error10hipError_t: # @_Z12handle_error10hipError_t
.cfi_startproc
# %bb.0:
testl %edi, %edi
jne .LBB0_2
# %bb.1:
retq
.LBB0_2:
pushq %rax
.cfi_def_cfa_offset 16
movl $_ZSt4cout, %edi
movl $.L.str, %esi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
xorl %edi, %edi
callq exit
.Lfunc_end0:
.size _Z12handle_error10hipError_t, .Lfunc_end0-_Z12handle_error10hipError_t
.cfi_endproc
# -- End function
.globl _Z17get_rand_in_rangev # -- Begin function _Z17get_rand_in_rangev
.p2align 4, 0x90
.type _Z17get_rand_in_rangev,@function
_Z17get_rand_in_rangev: # @_Z17get_rand_in_rangev
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
callq rand
# kill: def $eax killed $eax def $rax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
andl $-256, %ecx
subl %ecx, %eax
# kill: def $eax killed $eax killed $rax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z17get_rand_in_rangev, .Lfunc_end1-_Z17get_rand_in_rangev
.cfi_endproc
# -- End function
.globl _Z22__device_stub__convertPfS_i # -- Begin function _Z22__device_stub__convertPfS_i
.p2align 4, 0x90
.type _Z22__device_stub__convertPfS_i,@function
_Z22__device_stub__convertPfS_i: # @_Z22__device_stub__convertPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7convertPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z22__device_stub__convertPfS_i, .Lfunc_end2-_Z22__device_stub__convertPfS_i
.cfi_endproc
# -- End function
.globl _Z17initialise_matrixPfii # -- Begin function _Z17initialise_matrixPfii
.p2align 4, 0x90
.type _Z17initialise_matrixPfii,@function
_Z17initialise_matrixPfii: # @_Z17initialise_matrixPfii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
# kill: def $edx killed $edx def $rdx
movq %rdx, 16(%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB3_8
# %bb.1: # %.preheader15.lr.ph
movq %rdi, %r14
movl %esi, %eax
movq %rax, 24(%rsp) # 8-byte Spill
movq 16(%rsp), %rax # 8-byte Reload
movl %eax, %r12d
leal (%rax,%rax,2), %eax
movl %eax, 12(%rsp) # 4-byte Spill
xorl %eax, %eax
movq %rax, (%rsp) # 8-byte Spill
xorl %ecx, %ecx
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_7: # %._crit_edge
# in Loop: Header=BB3_2 Depth=1
movq 32(%rsp), %rcx # 8-byte Reload
incq %rcx
movq (%rsp), %rax # 8-byte Reload
addl 12(%rsp), %eax # 4-byte Folded Reload
movq %rax, (%rsp) # 8-byte Spill
cmpq 24(%rsp), %rcx # 8-byte Folded Reload
je .LBB3_8
.LBB3_2: # %.preheader15
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
# Child Loop BB3_5 Depth 3
movq %rcx, 32(%rsp) # 8-byte Spill
cmpl $0, 16(%rsp) # 4-byte Folded Reload
jle .LBB3_7
# %bb.3: # %.preheader.lr.ph
# in Loop: Header=BB3_2 Depth=1
movq (%rsp), %rax # 8-byte Reload
movl %eax, %ebx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB3_4: # %.preheader
# Parent Loop BB3_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_5 Depth 3
movl %ebx, %eax
leaq (%r14,%rax,4), %r13
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_5: # Parent Loop BB3_2 Depth=1
# Parent Loop BB3_4 Depth=2
# => This Inner Loop Header: Depth=3
callq rand
# kill: def $eax killed $eax def $rax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
andl $-256, %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r13,%r15,4)
incq %r15
cmpq $3, %r15
jne .LBB3_5
# %bb.6: # in Loop: Header=BB3_4 Depth=2
incq %rbp
addl $3, %ebx
cmpq %r12, %rbp
jne .LBB3_4
jmp .LBB3_7
.LBB3_8: # %._crit_edge19
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z17initialise_matrixPfii, .Lfunc_end3-_Z17initialise_matrixPfii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $160096, %rsp # imm = 0x27160
.cfi_def_cfa_offset 160144
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
leaq 16(%rsp), %rdi
movl $120000, %esi # imm = 0x1D4C0
callq hipMalloc
testl %eax, %eax
jne .LBB4_11
# %bb.1: # %_Z12handle_error10hipError_t.exit
leaq 8(%rsp), %rdi
movl $40000, %esi # imm = 0x9C40
callq hipMalloc
testl %eax, %eax
jne .LBB4_11
# %bb.2: # %.preheader15.i.preheader
leaq 40096(%rsp), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB4_3: # %.preheader15.i
# =>This Loop Header: Depth=1
# Child Loop BB4_4 Depth 2
# Child Loop BB4_5 Depth 3
movq %rbx, %r15
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB4_4: # %.preheader.i
# Parent Loop BB4_3 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB4_5 Depth 3
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB4_5: # Parent Loop BB4_3 Depth=1
# Parent Loop BB4_4 Depth=2
# => This Inner Loop Header: Depth=3
callq rand
# kill: def $eax killed $eax def $rax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
andl $-256, %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r15,%r13,4)
incq %r13
cmpq $3, %r13
jne .LBB4_5
# %bb.6: # in Loop: Header=BB4_4 Depth=2
incq %r12
addq $12, %r15
cmpq $100, %r12
jne .LBB4_4
# %bb.7: # %._crit_edge.i
# in Loop: Header=BB4_3 Depth=1
incq %r14
addq $1200, %rbx # imm = 0x4B0
cmpq $100, %r14
jne .LBB4_3
# %bb.8: # %_Z17initialise_matrixPfii.exit
movq 16(%rsp), %rdi
leaq 40096(%rsp), %rsi
movl $120000, %edx # imm = 0x1D4C0
movl $1, %ecx
callq hipMemcpy
movabsq $4294967552, %rdi # imm = 0x100000100
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_10
# %bb.9:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl $256, 28(%rsp) # imm = 0x100
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 28(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7convertPfS_i, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_10:
movq 8(%rsp), %rsi
leaq 96(%rsp), %rdi
movl $40000, %edx # imm = 0x9C40
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $160096, %rsp # imm = 0x27160
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB4_11:
.cfi_def_cfa_offset 160144
movl $_ZSt4cout, %edi
movl $.L.str, %esi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
xorl %edi, %edi
callq exit
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7convertPfS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error in cuda. Waiting..."
.size .L.str, 26
.type _Z7convertPfS_i,@object # @_Z7convertPfS_i
.section .rodata,"a",@progbits
.globl _Z7convertPfS_i
.p2align 3, 0x0
_Z7convertPfS_i:
.quad _Z22__device_stub__convertPfS_i
.size _Z7convertPfS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7convertPfS_i"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__convertPfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZSt4cout
.addrsig_sym _Z7convertPfS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z7convertPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.AND P0, PT, R0, 0x2710, PT ; /* 0x000027100000780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R15, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0f7435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R0, R15, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fc800078e020f */
/*0090*/ IMAD R4, R0, 0x3, RZ ; /* 0x0000000300047824 */
/* 0x000fc800078e02ff */
/*00a0*/ IMAD.WIDE R4, R4, R15, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fca00078e020f */
/*00b0*/ LDG.E R13, [R4.64+0x4] ; /* 0x00000404040d7981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ LDG.E R12, [R4.64] ; /* 0x00000004040c7981 */
/* 0x000ee8000c1e1900 */
/*00d0*/ LDG.E R10, [R4.64+0x8] ; /* 0x00000804040a7981 */
/* 0x000f22000c1e1900 */
/*00e0*/ F2F.F64.F32 R8, R13 ; /* 0x0000000d00087310 */
/* 0x005e300000201800 */
/*00f0*/ F2F.F64.F32 R6, R12 ; /* 0x0000000c00067310 */
/* 0x008e700000201800 */
/*0100*/ F2F.F64.F32 R10, R10 ; /* 0x0000000a000a7310 */
/* 0x010ea20000201800 */
/*0110*/ DMUL R8, R8, c[0x2][0x0] ; /* 0x0080000008087a28 */
/* 0x001e4c0000000000 */
/*0120*/ DFMA R6, R6, c[0x2][0x8], R8 ; /* 0x0080020006067a2b */
/* 0x0020a40000000008 */
/*0130*/ MOV R9, c[0x0][0x0] ; /* 0x0000000000097a02 */
/* 0x001fca0000000f00 */
/*0140*/ IMAD R0, R9, c[0x0][0x170], R0 ; /* 0x00005c0009007a24 */
/* 0x000fe200078e0200 */
/*0150*/ DFMA R6, R10, c[0x2][0x10], R6 ; /* 0x008004000a067a2b */
/* 0x004e080000000006 */
/*0160*/ ISETP.GE.AND P0, PT, R0, 0x2711, PT ; /* 0x000027110000780c */
/* 0x000fcc0003f06270 */
/*0170*/ F2F.F32.F64 R7, R6 ; /* 0x0000000600077310 */
/* 0x001e240000301000 */
/*0180*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0011ea000c101904 */
/*0190*/ @!P0 BRA 0x90 ; /* 0xfffffef000008947 */
/* 0x000fea000383ffff */
/*01a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01b0*/ BRA 0x1b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7convertPfS_i
.globl _Z7convertPfS_i
.p2align 8
.type _Z7convertPfS_i,@function
_Z7convertPfS_i:
s_load_b32 s2, s[0:1], 0x24
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s2, 0xffff
s_mov_b32 s2, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s9, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e32 0x2711, v1
s_cbranch_execz .LBB0_3
s_clause 0x1
s_load_b32 s10, s[0:1], 0x10
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
v_lshl_add_u32 v3, v1, 1, v1
s_mov_b32 s8, 0
s_mov_b32 s5, 0x3fe6b851
s_mov_b32 s4, 0xeb851eb8
v_lshlrev_b64 v[5:6], 2, v[1:2]
s_mov_b32 s7, 0x3fcae147
s_mov_b32 s6, 0xae147ae1
s_waitcnt lgkmcnt(0)
s_mul_i32 s9, s9, s10
s_delay_alu instid0(VALU_DEP_1)
v_add_co_u32 v5, vcc_lo, s2, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v6, vcc_lo
s_mul_i32 s10, s9, 3
s_mov_b32 s3, 0x3fb1eb85
s_mov_b32 s2, 0x1eb851ec
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v4, 31, v3
v_add_nc_u32_e32 v1, s9, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[7:8], 2, v[3:4]
v_add_nc_u32_e32 v3, s10, v3
v_add_co_u32 v7, vcc_lo, s0, v7
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v8, vcc_lo, s1, v8, vcc_lo
v_cmp_lt_i32_e32 vcc_lo, 0x2710, v1
global_load_b96 v[7:9], v[7:8], off
s_or_b32 s8, vcc_lo, s8
s_waitcnt vmcnt(0)
v_cvt_f64_f32_e32 v[10:11], v8
v_cvt_f64_f32_e32 v[7:8], v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[10:11], v[10:11], s[4:5]
v_fma_f64 v[7:8], v[7:8], s[6:7], v[10:11]
v_cvt_f64_f32_e32 v[9:10], v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[7:8], v[9:10], s[2:3], v[7:8]
v_cvt_f32_f64_e32 v0, v[7:8]
global_store_b32 v[5:6], v0, off
s_and_not1_b32 exec_lo, exec_lo, s8
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7convertPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7convertPfS_i, .Lfunc_end0-_Z7convertPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7convertPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7convertPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00063fea_00000000-6_prog5.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3675:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3675:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Error in cuda. Waiting..."
.text
.globl _Z12handle_error9cudaError
.type _Z12handle_error9cudaError, @function
_Z12handle_error9cudaError:
.LFB3669:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L8
ret
.L8:
subq $8, %rsp
.cfi_def_cfa_offset 16
movl $25, %edx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl $0, %edi
call exit@PLT
.cfi_endproc
.LFE3669:
.size _Z12handle_error9cudaError, .-_Z12handle_error9cudaError
.globl _Z17get_rand_in_rangev
.type _Z17get_rand_in_rangev, @function
_Z17get_rand_in_rangev:
.LFB3670:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call rand@PLT
cltd
shrl $24, %edx
addl %edx, %eax
movzbl %al, %eax
subl %edx, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3670:
.size _Z17get_rand_in_rangev, .-_Z17get_rand_in_rangev
.globl _Z17initialise_matrixPfii
.type _Z17initialise_matrixPfii, @function
_Z17initialise_matrixPfii:
.LFB3671:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 24(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
testl %esi, %esi
jle .L11
leal (%rdx,%rdx,2), %eax
movl %eax, 20(%rsp)
movl %eax, %r13d
movl $0, %r14d
movl $0, %r15d
jmp .L13
.L19:
addl $3, %r12d
addq $12, %rbp
cmpl %r13d, %r12d
je .L15
.L16:
movl $0, %ebx
.L14:
call _Z17get_rand_in_rangev
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rbx,4)
addq $1, %rbx
cmpq $3, %rbx
jne .L14
jmp .L19
.L15:
addl $1, %r15d
movl 20(%rsp), %eax
addl %eax, %r13d
movl 12(%rsp), %eax
addl %eax, %r14d
cmpl %r15d, 16(%rsp)
je .L11
.L13:
cmpl $0, 12(%rsp)
jle .L15
leal (%r14,%r14,2), %r12d
movslq %r14d, %rax
leaq (%rax,%rax,2), %rax
movq 24(%rsp), %rcx
leaq (%rcx,%rax,4), %rbp
jmp .L16
.L11:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3671:
.size _Z17initialise_matrixPfii, .-_Z17initialise_matrixPfii
.globl _Z29__device_stub__Z7convertPfS_iPfS_i
.type _Z29__device_stub__Z7convertPfS_iPfS_i, @function
_Z29__device_stub__Z7convertPfS_iPfS_i:
.LFB3697:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L24
.L20:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L25
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7convertPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L20
.L25:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3697:
.size _Z29__device_stub__Z7convertPfS_iPfS_i, .-_Z29__device_stub__Z7convertPfS_iPfS_i
.globl _Z7convertPfS_i
.type _Z7convertPfS_i, @function
_Z7convertPfS_i:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z7convertPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _Z7convertPfS_i, .-_Z7convertPfS_i
.globl main
.type main, @function
main:
.LFB3672:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq -159744(%rsp), %r11
.cfi_def_cfa 11, 159760
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $320, %rsp
.cfi_def_cfa_offset 160080
movq %fs:40, %rax
movq %rax, 160056(%rsp)
xorl %eax, %eax
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
leaq 8(%rsp), %rdi
movl $120000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call _Z12handle_error9cudaError
leaq 16(%rsp), %rdi
movl $40000, %esi
call cudaMalloc@PLT
movl %eax, %edi
call _Z12handle_error9cudaError
leaq 40048(%rsp), %rbx
movl $100, %edx
movl $100, %esi
movq %rbx, %rdi
call _Z17initialise_matrixPfii
movl $1, %ecx
movl $120000, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $256, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L32
.L29:
leaq 48(%rsp), %rdi
movl $2, %ecx
movl $40000, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 160056(%rsp), %rax
subq %fs:40, %rax
jne .L33
movl $0, %eax
addq $160064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L32:
.cfi_restore_state
movl $256, %edx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z29__device_stub__Z7convertPfS_iPfS_i
jmp .L29
.L33:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3672:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z7convertPfS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3700:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z7convertPfS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3700:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "prog5.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z12handle_error10hipError_t # -- Begin function _Z12handle_error10hipError_t
.p2align 4, 0x90
.type _Z12handle_error10hipError_t,@function
_Z12handle_error10hipError_t: # @_Z12handle_error10hipError_t
.cfi_startproc
# %bb.0:
testl %edi, %edi
jne .LBB0_2
# %bb.1:
retq
.LBB0_2:
pushq %rax
.cfi_def_cfa_offset 16
movl $_ZSt4cout, %edi
movl $.L.str, %esi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
xorl %edi, %edi
callq exit
.Lfunc_end0:
.size _Z12handle_error10hipError_t, .Lfunc_end0-_Z12handle_error10hipError_t
.cfi_endproc
# -- End function
.globl _Z17get_rand_in_rangev # -- Begin function _Z17get_rand_in_rangev
.p2align 4, 0x90
.type _Z17get_rand_in_rangev,@function
_Z17get_rand_in_rangev: # @_Z17get_rand_in_rangev
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
callq rand
# kill: def $eax killed $eax def $rax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
andl $-256, %ecx
subl %ecx, %eax
# kill: def $eax killed $eax killed $rax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z17get_rand_in_rangev, .Lfunc_end1-_Z17get_rand_in_rangev
.cfi_endproc
# -- End function
.globl _Z22__device_stub__convertPfS_i # -- Begin function _Z22__device_stub__convertPfS_i
.p2align 4, 0x90
.type _Z22__device_stub__convertPfS_i,@function
_Z22__device_stub__convertPfS_i: # @_Z22__device_stub__convertPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7convertPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z22__device_stub__convertPfS_i, .Lfunc_end2-_Z22__device_stub__convertPfS_i
.cfi_endproc
# -- End function
.globl _Z17initialise_matrixPfii # -- Begin function _Z17initialise_matrixPfii
.p2align 4, 0x90
.type _Z17initialise_matrixPfii,@function
_Z17initialise_matrixPfii: # @_Z17initialise_matrixPfii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
# kill: def $edx killed $edx def $rdx
movq %rdx, 16(%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB3_8
# %bb.1: # %.preheader15.lr.ph
movq %rdi, %r14
movl %esi, %eax
movq %rax, 24(%rsp) # 8-byte Spill
movq 16(%rsp), %rax # 8-byte Reload
movl %eax, %r12d
leal (%rax,%rax,2), %eax
movl %eax, 12(%rsp) # 4-byte Spill
xorl %eax, %eax
movq %rax, (%rsp) # 8-byte Spill
xorl %ecx, %ecx
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_7: # %._crit_edge
# in Loop: Header=BB3_2 Depth=1
movq 32(%rsp), %rcx # 8-byte Reload
incq %rcx
movq (%rsp), %rax # 8-byte Reload
addl 12(%rsp), %eax # 4-byte Folded Reload
movq %rax, (%rsp) # 8-byte Spill
cmpq 24(%rsp), %rcx # 8-byte Folded Reload
je .LBB3_8
.LBB3_2: # %.preheader15
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
# Child Loop BB3_5 Depth 3
movq %rcx, 32(%rsp) # 8-byte Spill
cmpl $0, 16(%rsp) # 4-byte Folded Reload
jle .LBB3_7
# %bb.3: # %.preheader.lr.ph
# in Loop: Header=BB3_2 Depth=1
movq (%rsp), %rax # 8-byte Reload
movl %eax, %ebx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB3_4: # %.preheader
# Parent Loop BB3_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_5 Depth 3
movl %ebx, %eax
leaq (%r14,%rax,4), %r13
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_5: # Parent Loop BB3_2 Depth=1
# Parent Loop BB3_4 Depth=2
# => This Inner Loop Header: Depth=3
callq rand
# kill: def $eax killed $eax def $rax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
andl $-256, %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r13,%r15,4)
incq %r15
cmpq $3, %r15
jne .LBB3_5
# %bb.6: # in Loop: Header=BB3_4 Depth=2
incq %rbp
addl $3, %ebx
cmpq %r12, %rbp
jne .LBB3_4
jmp .LBB3_7
.LBB3_8: # %._crit_edge19
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z17initialise_matrixPfii, .Lfunc_end3-_Z17initialise_matrixPfii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $160096, %rsp # imm = 0x27160
.cfi_def_cfa_offset 160144
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
leaq 16(%rsp), %rdi
movl $120000, %esi # imm = 0x1D4C0
callq hipMalloc
testl %eax, %eax
jne .LBB4_11
# %bb.1: # %_Z12handle_error10hipError_t.exit
leaq 8(%rsp), %rdi
movl $40000, %esi # imm = 0x9C40
callq hipMalloc
testl %eax, %eax
jne .LBB4_11
# %bb.2: # %.preheader15.i.preheader
leaq 40096(%rsp), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB4_3: # %.preheader15.i
# =>This Loop Header: Depth=1
# Child Loop BB4_4 Depth 2
# Child Loop BB4_5 Depth 3
movq %rbx, %r15
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB4_4: # %.preheader.i
# Parent Loop BB4_3 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB4_5 Depth 3
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB4_5: # Parent Loop BB4_3 Depth=1
# Parent Loop BB4_4 Depth=2
# => This Inner Loop Header: Depth=3
callq rand
# kill: def $eax killed $eax def $rax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
andl $-256, %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r15,%r13,4)
incq %r13
cmpq $3, %r13
jne .LBB4_5
# %bb.6: # in Loop: Header=BB4_4 Depth=2
incq %r12
addq $12, %r15
cmpq $100, %r12
jne .LBB4_4
# %bb.7: # %._crit_edge.i
# in Loop: Header=BB4_3 Depth=1
incq %r14
addq $1200, %rbx # imm = 0x4B0
cmpq $100, %r14
jne .LBB4_3
# %bb.8: # %_Z17initialise_matrixPfii.exit
movq 16(%rsp), %rdi
leaq 40096(%rsp), %rsi
movl $120000, %edx # imm = 0x1D4C0
movl $1, %ecx
callq hipMemcpy
movabsq $4294967552, %rdi # imm = 0x100000100
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_10
# %bb.9:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl $256, 28(%rsp) # imm = 0x100
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 28(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7convertPfS_i, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_10:
movq 8(%rsp), %rsi
leaq 96(%rsp), %rdi
movl $40000, %edx # imm = 0x9C40
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $160096, %rsp # imm = 0x27160
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB4_11:
.cfi_def_cfa_offset 160144
movl $_ZSt4cout, %edi
movl $.L.str, %esi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
xorl %edi, %edi
callq exit
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7convertPfS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error in cuda. Waiting..."
.size .L.str, 26
.type _Z7convertPfS_i,@object # @_Z7convertPfS_i
.section .rodata,"a",@progbits
.globl _Z7convertPfS_i
.p2align 3, 0x0
_Z7convertPfS_i:
.quad _Z22__device_stub__convertPfS_i
.size _Z7convertPfS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7convertPfS_i"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__convertPfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZSt4cout
.addrsig_sym _Z7convertPfS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void externSet(int* variablesMem,int* lastValuesMem, int nQueen,int nVariableCollection){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < nVariableCollection*nQueen*nQueen){
variablesMem[index] = 1;
if(index < nVariableCollection*nQueen)
lastValuesMem[index] = 0;
}
} | code for sm_80
Function : _Z9externSetPiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR6, c[0x0][0x170] ; /* 0x00005c0000067ab9 */
/* 0x000fe40000000a00 */
/*0030*/ UIMAD UR4, UR6, UR7, URZ ; /* 0x00000007060472a4 */
/* 0x000fe2000f8e023f */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e260000002100 */
/*0050*/ UIMAD UR5, UR4, UR6, URZ ; /* 0x00000006040572a4 */
/* 0x000fe2000f8e023f */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0070*/ ISETP.GE.AND P0, PT, R0, UR5, PT ; /* 0x0000000500007c0c */
/* 0x000fda000bf06270 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ ISETP.GE.AND P0, PT, R0.reuse, UR4, PT ; /* 0x0000000400007c0c */
/* 0x040fe2000bf06270 */
/*00a0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*00b0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x1 ; /* 0x00000001ff057424 */
/* 0x000fe400078e00ff */
/*00d0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0203 */
/*00e0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x0001e6000c101906 */
/*00f0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0100*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */
/* 0x001fe40000011400 */
/*0110*/ LEA R2, P0, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000027a11 */
/* 0x000fc800078010ff */
/*0120*/ LEA.HI.X R3, R0, c[0x0][0x16c], R3, 0x2, P0 ; /* 0x00005b0000037a11 */
/* 0x000fca00000f1403 */
/*0130*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */
/* 0x000fe2000c101906 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void externSet(int* variablesMem,int* lastValuesMem, int nQueen,int nVariableCollection){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < nVariableCollection*nQueen*nQueen){
variablesMem[index] = 1;
if(index < nVariableCollection*nQueen)
lastValuesMem[index] = 0;
}
} | .file "tmpxft_000f6be2_00000000-6_externSet.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9externSetPiS_iiPiS_ii
.type _Z32__device_stub__Z9externSetPiS_iiPiS_ii, @function
_Z32__device_stub__Z9externSetPiS_iiPiS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9externSetPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z32__device_stub__Z9externSetPiS_iiPiS_ii, .-_Z32__device_stub__Z9externSetPiS_iiPiS_ii
.globl _Z9externSetPiS_ii
.type _Z9externSetPiS_ii, @function
_Z9externSetPiS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9externSetPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9externSetPiS_ii, .-_Z9externSetPiS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9externSetPiS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9externSetPiS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void externSet(int* variablesMem,int* lastValuesMem, int nQueen,int nVariableCollection){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < nVariableCollection*nQueen*nQueen){
variablesMem[index] = 1;
if(index < nVariableCollection*nQueen)
lastValuesMem[index] = 0;
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void externSet(int* variablesMem,int* lastValuesMem, int nQueen,int nVariableCollection){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < nVariableCollection*nQueen*nQueen){
variablesMem[index] = 1;
if(index < nVariableCollection*nQueen)
lastValuesMem[index] = 0;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void externSet(int* variablesMem,int* lastValuesMem, int nQueen,int nVariableCollection){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < nVariableCollection*nQueen*nQueen){
variablesMem[index] = 1;
if(index < nVariableCollection*nQueen)
lastValuesMem[index] = 0;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9externSetPiS_ii
.globl _Z9externSetPiS_ii
.p2align 8
.type _Z9externSetPiS_ii,@function
_Z9externSetPiS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mul_i32 s2, s5, s4
s_mul_i32 s3, s2, s4
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s3, v1
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB0_3
s_load_b64 s[4:5], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v0, 1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_cmp_gt_i32_e32 vcc_lo, s2, v1
global_store_b32 v[4:5], v0, off
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_3
s_load_b64 s[0:1], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v3, vcc_lo
v_mov_b32_e32 v2, 0
global_store_b32 v[0:1], v2, off
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9externSetPiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9externSetPiS_ii, .Lfunc_end0-_Z9externSetPiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9externSetPiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9externSetPiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void externSet(int* variablesMem,int* lastValuesMem, int nQueen,int nVariableCollection){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < nVariableCollection*nQueen*nQueen){
variablesMem[index] = 1;
if(index < nVariableCollection*nQueen)
lastValuesMem[index] = 0;
}
} | .text
.file "externSet.hip"
.globl _Z24__device_stub__externSetPiS_ii # -- Begin function _Z24__device_stub__externSetPiS_ii
.p2align 4, 0x90
.type _Z24__device_stub__externSetPiS_ii,@function
_Z24__device_stub__externSetPiS_ii: # @_Z24__device_stub__externSetPiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9externSetPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__externSetPiS_ii, .Lfunc_end0-_Z24__device_stub__externSetPiS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9externSetPiS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9externSetPiS_ii,@object # @_Z9externSetPiS_ii
.section .rodata,"a",@progbits
.globl _Z9externSetPiS_ii
.p2align 3, 0x0
_Z9externSetPiS_ii:
.quad _Z24__device_stub__externSetPiS_ii
.size _Z9externSetPiS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9externSetPiS_ii"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__externSetPiS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9externSetPiS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9externSetPiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR6, c[0x0][0x170] ; /* 0x00005c0000067ab9 */
/* 0x000fe40000000a00 */
/*0030*/ UIMAD UR4, UR6, UR7, URZ ; /* 0x00000007060472a4 */
/* 0x000fe2000f8e023f */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e260000002100 */
/*0050*/ UIMAD UR5, UR4, UR6, URZ ; /* 0x00000006040572a4 */
/* 0x000fe2000f8e023f */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0070*/ ISETP.GE.AND P0, PT, R0, UR5, PT ; /* 0x0000000500007c0c */
/* 0x000fda000bf06270 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ ISETP.GE.AND P0, PT, R0.reuse, UR4, PT ; /* 0x0000000400007c0c */
/* 0x040fe2000bf06270 */
/*00a0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*00b0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x1 ; /* 0x00000001ff057424 */
/* 0x000fe400078e00ff */
/*00d0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0203 */
/*00e0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x0001e6000c101906 */
/*00f0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0100*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */
/* 0x001fe40000011400 */
/*0110*/ LEA R2, P0, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000027a11 */
/* 0x000fc800078010ff */
/*0120*/ LEA.HI.X R3, R0, c[0x0][0x16c], R3, 0x2, P0 ; /* 0x00005b0000037a11 */
/* 0x000fca00000f1403 */
/*0130*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */
/* 0x000fe2000c101906 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9externSetPiS_ii
.globl _Z9externSetPiS_ii
.p2align 8
.type _Z9externSetPiS_ii,@function
_Z9externSetPiS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mul_i32 s2, s5, s4
s_mul_i32 s3, s2, s4
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s3, v1
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB0_3
s_load_b64 s[4:5], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v0, 1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_cmp_gt_i32_e32 vcc_lo, s2, v1
global_store_b32 v[4:5], v0, off
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_3
s_load_b64 s[0:1], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v3, vcc_lo
v_mov_b32_e32 v2, 0
global_store_b32 v[0:1], v2, off
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9externSetPiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9externSetPiS_ii, .Lfunc_end0-_Z9externSetPiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9externSetPiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9externSetPiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000f6be2_00000000-6_externSet.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9externSetPiS_iiPiS_ii
.type _Z32__device_stub__Z9externSetPiS_iiPiS_ii, @function
_Z32__device_stub__Z9externSetPiS_iiPiS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9externSetPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z32__device_stub__Z9externSetPiS_iiPiS_ii, .-_Z32__device_stub__Z9externSetPiS_iiPiS_ii
.globl _Z9externSetPiS_ii
.type _Z9externSetPiS_ii, @function
_Z9externSetPiS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9externSetPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9externSetPiS_ii, .-_Z9externSetPiS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9externSetPiS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9externSetPiS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "externSet.hip"
.globl _Z24__device_stub__externSetPiS_ii # -- Begin function _Z24__device_stub__externSetPiS_ii
.p2align 4, 0x90
.type _Z24__device_stub__externSetPiS_ii,@function
_Z24__device_stub__externSetPiS_ii: # @_Z24__device_stub__externSetPiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9externSetPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__externSetPiS_ii, .Lfunc_end0-_Z24__device_stub__externSetPiS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9externSetPiS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9externSetPiS_ii,@object # @_Z9externSetPiS_ii
.section .rodata,"a",@progbits
.globl _Z9externSetPiS_ii
.p2align 3, 0x0
_Z9externSetPiS_ii:
.quad _Z24__device_stub__externSetPiS_ii
.size _Z9externSetPiS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9externSetPiS_ii"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__externSetPiS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9externSetPiS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
int main(){
cudaError_t custatus;
int gpu_num = 0;
cudaDeviceProp prop;
custatus = cudaGetDeviceCount(&gpu_num);
printf("Number of GPUs : %d\n", gpu_num);
custatus = cudaSetDevice(0);
if(custatus != cudaSuccess){
printf("Failed to set Device 0. Exit\n");
return -1;
}
cudaGetDeviceProperties(&prop, 0);
printf("Device name : %s\n", prop.name);
printf("Compute Capability : %d.%d\n", prop.major, prop.minor);
if(prop.major<3 || (prop.major==3 && prop.minor<5)){
printf("[error] Your device compute capability is too low. Exit.\n");
return -1;
}
printf("totalGlobalMem : %.1f GB\n", prop.totalGlobalMem/1024/1024/1024.0);
if(prop.totalGlobalMem/1024/1024/1024.0 < 2){
printf("[error] Your device global memory is too small. Exit.\n");
return -1;
}
printf("canMapHostMem : %d\n", prop.canMapHostMemory);
if(!prop.canMapHostMemory){
printf("[error] Your device do not support host memory mapping. Exit.\n");
return -1;
}
printf("sharedMemPerBlock : %u KB\n", (unsigned int)prop.sharedMemPerBlock/1024);
printf("maxThreadsPerBlock : %d\n", prop.maxThreadsPerBlock);
printf("maxGridSize : %d, %d, %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("regPerBlock : %d\n", prop.regsPerBlock);
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
int main(){
cudaError_t custatus;
int gpu_num = 0;
cudaDeviceProp prop;
custatus = cudaGetDeviceCount(&gpu_num);
printf("Number of GPUs : %d\n", gpu_num);
custatus = cudaSetDevice(0);
if(custatus != cudaSuccess){
printf("Failed to set Device 0. Exit\n");
return -1;
}
cudaGetDeviceProperties(&prop, 0);
printf("Device name : %s\n", prop.name);
printf("Compute Capability : %d.%d\n", prop.major, prop.minor);
if(prop.major<3 || (prop.major==3 && prop.minor<5)){
printf("[error] Your device compute capability is too low. Exit.\n");
return -1;
}
printf("totalGlobalMem : %.1f GB\n", prop.totalGlobalMem/1024/1024/1024.0);
if(prop.totalGlobalMem/1024/1024/1024.0 < 2){
printf("[error] Your device global memory is too small. Exit.\n");
return -1;
}
printf("canMapHostMem : %d\n", prop.canMapHostMemory);
if(!prop.canMapHostMemory){
printf("[error] Your device do not support host memory mapping. Exit.\n");
return -1;
}
printf("sharedMemPerBlock : %u KB\n", (unsigned int)prop.sharedMemPerBlock/1024);
printf("maxThreadsPerBlock : %d\n", prop.maxThreadsPerBlock);
printf("maxGridSize : %d, %d, %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("regPerBlock : %d\n", prop.regsPerBlock);
} | .file "tmpxft_0015b334_00000000-6_tst_gpu.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Number of GPUs : %d\n"
.LC1:
.string "Failed to set Device 0. Exit\n"
.LC2:
.string "Device name : %s\n"
.LC3:
.string "Compute Capability : %d.%d\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "[error] Your device compute capability is too low. Exit.\n"
.align 8
.LC6:
.string "totalGlobalMem : %.1f GB\n"
.align 8
.LC8:
.string "[error] Your device global memory is too small. Exit.\n"
.section .rodata.str1.1
.LC9:
.string "canMapHostMem : %d\n"
.section .rodata.str1.8
.align 8
.LC10:
.string "[error] Your device do not support host memory mapping. Exit.\n"
.section .rodata.str1.1
.LC11:
.string "sharedMemPerBlock : %u KB\n"
.LC12:
.string "maxThreadsPerBlock : %d\n"
.section .rodata.str1.8
.align 8
.LC13:
.string "maxGridSize : %d, %d, %d\n"
.section .rodata.str1.1
.LC14:
.string "regPerBlock : %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $1056, %rsp
.cfi_def_cfa_offset 1072
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
movl $0, 12(%rsp)
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edi
call cudaSetDevice@PLT
testl %eax, %eax
jne .L19
leaq 16(%rsp), %rbx
movl $0, %esi
movq %rbx, %rdi
call cudaGetDeviceProperties_v2@PLT
movq %rbx, %rdx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 380(%rsp), %ecx
movl 376(%rsp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 376(%rsp), %eax
cmpl $2, %eax
jle .L6
cmpl $3, %eax
jne .L7
cmpl $4, 380(%rsp)
jle .L6
.L7:
movq 304(%rsp), %rax
shrq $20, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
mulsd .LC5(%rip), %xmm0
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 304(%rsp), %rax
shrq $20, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
mulsd .LC5(%rip), %xmm0
movsd .LC7(%rip), %xmm1
comisd %xmm0, %xmm1
ja .L20
movl 416(%rsp), %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 416(%rsp)
je .L21
movl 312(%rsp), %edx
shrl $10, %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 360(%rsp), %r8d
movl 356(%rsp), %ecx
movl 352(%rsp), %edx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 320(%rsp), %edx
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
.L3:
movq 1048(%rsp), %rdx
subq %fs:40, %rdx
jne .L22
addq $1056, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
jmp .L3
.L6:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
jmp .L3
.L20:
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
jmp .L3
.L21:
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
jmp .L3
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC5:
.long 0
.long 1062207488
.align 8
.LC7:
.long 0
.long 1073741824
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
int main(){
cudaError_t custatus;
int gpu_num = 0;
cudaDeviceProp prop;
custatus = cudaGetDeviceCount(&gpu_num);
printf("Number of GPUs : %d\n", gpu_num);
custatus = cudaSetDevice(0);
if(custatus != cudaSuccess){
printf("Failed to set Device 0. Exit\n");
return -1;
}
cudaGetDeviceProperties(&prop, 0);
printf("Device name : %s\n", prop.name);
printf("Compute Capability : %d.%d\n", prop.major, prop.minor);
if(prop.major<3 || (prop.major==3 && prop.minor<5)){
printf("[error] Your device compute capability is too low. Exit.\n");
return -1;
}
printf("totalGlobalMem : %.1f GB\n", prop.totalGlobalMem/1024/1024/1024.0);
if(prop.totalGlobalMem/1024/1024/1024.0 < 2){
printf("[error] Your device global memory is too small. Exit.\n");
return -1;
}
printf("canMapHostMem : %d\n", prop.canMapHostMemory);
if(!prop.canMapHostMemory){
printf("[error] Your device do not support host memory mapping. Exit.\n");
return -1;
}
printf("sharedMemPerBlock : %u KB\n", (unsigned int)prop.sharedMemPerBlock/1024);
printf("maxThreadsPerBlock : %d\n", prop.maxThreadsPerBlock);
printf("maxGridSize : %d, %d, %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("regPerBlock : %d\n", prop.regsPerBlock);
} | #include <stdio.h>
#include "hip/hip_runtime.h"
int main(){
hipError_t custatus;
int gpu_num = 0;
hipDeviceProp_t prop;
custatus = hipGetDeviceCount(&gpu_num);
printf("Number of GPUs : %d\n", gpu_num);
custatus = hipSetDevice(0);
if(custatus != hipSuccess){
printf("Failed to set Device 0. Exit\n");
return -1;
}
hipGetDeviceProperties(&prop, 0);
printf("Device name : %s\n", prop.name);
printf("Compute Capability : %d.%d\n", prop.major, prop.minor);
if(prop.major<3 || (prop.major==3 && prop.minor<5)){
printf("[error] Your device compute capability is too low. Exit.\n");
return -1;
}
printf("totalGlobalMem : %.1f GB\n", prop.totalGlobalMem/1024/1024/1024.0);
if(prop.totalGlobalMem/1024/1024/1024.0 < 2){
printf("[error] Your device global memory is too small. Exit.\n");
return -1;
}
printf("canMapHostMem : %d\n", prop.canMapHostMemory);
if(!prop.canMapHostMemory){
printf("[error] Your device do not support host memory mapping. Exit.\n");
return -1;
}
printf("sharedMemPerBlock : %u KB\n", (unsigned int)prop.sharedMemPerBlock/1024);
printf("maxThreadsPerBlock : %d\n", prop.maxThreadsPerBlock);
printf("maxGridSize : %d, %d, %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("regPerBlock : %d\n", prop.regsPerBlock);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include "hip/hip_runtime.h"
int main(){
hipError_t custatus;
int gpu_num = 0;
hipDeviceProp_t prop;
custatus = hipGetDeviceCount(&gpu_num);
printf("Number of GPUs : %d\n", gpu_num);
custatus = hipSetDevice(0);
if(custatus != hipSuccess){
printf("Failed to set Device 0. Exit\n");
return -1;
}
hipGetDeviceProperties(&prop, 0);
printf("Device name : %s\n", prop.name);
printf("Compute Capability : %d.%d\n", prop.major, prop.minor);
if(prop.major<3 || (prop.major==3 && prop.minor<5)){
printf("[error] Your device compute capability is too low. Exit.\n");
return -1;
}
printf("totalGlobalMem : %.1f GB\n", prop.totalGlobalMem/1024/1024/1024.0);
if(prop.totalGlobalMem/1024/1024/1024.0 < 2){
printf("[error] Your device global memory is too small. Exit.\n");
return -1;
}
printf("canMapHostMem : %d\n", prop.canMapHostMemory);
if(!prop.canMapHostMemory){
printf("[error] Your device do not support host memory mapping. Exit.\n");
return -1;
}
printf("sharedMemPerBlock : %u KB\n", (unsigned int)prop.sharedMemPerBlock/1024);
printf("maxThreadsPerBlock : %d\n", prop.maxThreadsPerBlock);
printf("maxGridSize : %d, %d, %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("regPerBlock : %d\n", prop.regsPerBlock);
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include "hip/hip_runtime.h"
int main(){
hipError_t custatus;
int gpu_num = 0;
hipDeviceProp_t prop;
custatus = hipGetDeviceCount(&gpu_num);
printf("Number of GPUs : %d\n", gpu_num);
custatus = hipSetDevice(0);
if(custatus != hipSuccess){
printf("Failed to set Device 0. Exit\n");
return -1;
}
hipGetDeviceProperties(&prop, 0);
printf("Device name : %s\n", prop.name);
printf("Compute Capability : %d.%d\n", prop.major, prop.minor);
if(prop.major<3 || (prop.major==3 && prop.minor<5)){
printf("[error] Your device compute capability is too low. Exit.\n");
return -1;
}
printf("totalGlobalMem : %.1f GB\n", prop.totalGlobalMem/1024/1024/1024.0);
if(prop.totalGlobalMem/1024/1024/1024.0 < 2){
printf("[error] Your device global memory is too small. Exit.\n");
return -1;
}
printf("canMapHostMem : %d\n", prop.canMapHostMemory);
if(!prop.canMapHostMemory){
printf("[error] Your device do not support host memory mapping. Exit.\n");
return -1;
}
printf("sharedMemPerBlock : %u KB\n", (unsigned int)prop.sharedMemPerBlock/1024);
printf("maxThreadsPerBlock : %d\n", prop.maxThreadsPerBlock);
printf("maxGridSize : %d, %d, %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("regPerBlock : %d\n", prop.regsPerBlock);
} | .text
.file "tst_gpu.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI0_0:
.quad 0x3f50000000000000 # double 9.765625E-4
.LCPI0_1:
.quad 0x4000000000000000 # double 2
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 1504
.cfi_offset %rbx, -16
movl $0, 12(%rsp)
leaq 12(%rsp), %rdi
callq hipGetDeviceCount
movl 12(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
xorl %edi, %edi
callq hipSetDevice
testl %eax, %eax
je .LBB0_3
# %bb.1:
movl $.Lstr.3, %edi
jmp .LBB0_2
.LBB0_3:
leaq 16(%rsp), %rbx
movq %rbx, %rdi
xorl %esi, %esi
callq hipGetDevicePropertiesR0600
movl $.L.str.2, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq printf
movl 376(%rsp), %esi
movl 380(%rsp), %edx
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
cmpl $3, 376(%rsp)
jl .LBB0_6
# %bb.4:
jne .LBB0_7
# %bb.5:
cmpl $4, 380(%rsp)
jg .LBB0_7
.LBB0_6:
movl $.Lstr.2, %edi
.LBB0_2:
callq puts@PLT
movl $-1, %eax
.LBB0_12:
addq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB0_7:
.cfi_def_cfa_offset 1504
movq 304(%rsp), %rax
shrq $20, %rax
cvtsi2sd %rax, %xmm0
mulsd .LCPI0_0(%rip), %xmm0
movl $.L.str.5, %edi
movb $1, %al
callq printf
movq 304(%rsp), %rax
shrq $20, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
mulsd .LCPI0_0(%rip), %xmm0
movsd .LCPI0_1(%rip), %xmm1 # xmm1 = mem[0],zero
ucomisd %xmm0, %xmm1
jbe .LBB0_9
# %bb.8:
movl $.Lstr.1, %edi
jmp .LBB0_2
.LBB0_9:
movl 416(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
cmpl $0, 416(%rsp)
je .LBB0_10
# %bb.11:
movl 312(%rsp), %esi
shrl $10, %esi
movl $.L.str.9, %edi
xorl %eax, %eax
callq printf
movl 336(%rsp), %esi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
movl 352(%rsp), %esi
movl 356(%rsp), %edx
movl 360(%rsp), %ecx
movl $.L.str.11, %edi
xorl %eax, %eax
callq printf
movl 320(%rsp), %esi
movl $.L.str.12, %edi
xorl %eax, %eax
callq printf
xorl %eax, %eax
jmp .LBB0_12
.LBB0_10:
movl $.Lstr, %edi
jmp .LBB0_2
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Number of GPUs : %d\n"
.size .L.str, 27
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Device name : %s\n"
.size .L.str.2, 27
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Compute Capability : %d.%d\n"
.size .L.str.3, 30
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "totalGlobalMem : %.1f GB\n"
.size .L.str.5, 32
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "canMapHostMem : %d\n"
.size .L.str.7, 27
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "sharedMemPerBlock : %u KB\n"
.size .L.str.9, 30
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "maxThreadsPerBlock : %d\n"
.size .L.str.10, 27
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "maxGridSize : %d, %d, %d\n"
.size .L.str.11, 35
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "regPerBlock : %d\n"
.size .L.str.12, 27
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "[error] Your device do not support host memory mapping. Exit."
.size .Lstr, 62
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "[error] Your device global memory is too small. Exit."
.size .Lstr.1, 54
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "[error] Your device compute capability is too low. Exit."
.size .Lstr.2, 57
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Failed to set Device 0. Exit"
.size .Lstr.3, 29
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0015b334_00000000-6_tst_gpu.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Number of GPUs : %d\n"
.LC1:
.string "Failed to set Device 0. Exit\n"
.LC2:
.string "Device name : %s\n"
.LC3:
.string "Compute Capability : %d.%d\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "[error] Your device compute capability is too low. Exit.\n"
.align 8
.LC6:
.string "totalGlobalMem : %.1f GB\n"
.align 8
.LC8:
.string "[error] Your device global memory is too small. Exit.\n"
.section .rodata.str1.1
.LC9:
.string "canMapHostMem : %d\n"
.section .rodata.str1.8
.align 8
.LC10:
.string "[error] Your device do not support host memory mapping. Exit.\n"
.section .rodata.str1.1
.LC11:
.string "sharedMemPerBlock : %u KB\n"
.LC12:
.string "maxThreadsPerBlock : %d\n"
.section .rodata.str1.8
.align 8
.LC13:
.string "maxGridSize : %d, %d, %d\n"
.section .rodata.str1.1
.LC14:
.string "regPerBlock : %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $1056, %rsp
.cfi_def_cfa_offset 1072
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
movl $0, 12(%rsp)
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edi
call cudaSetDevice@PLT
testl %eax, %eax
jne .L19
leaq 16(%rsp), %rbx
movl $0, %esi
movq %rbx, %rdi
call cudaGetDeviceProperties_v2@PLT
movq %rbx, %rdx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 380(%rsp), %ecx
movl 376(%rsp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 376(%rsp), %eax
cmpl $2, %eax
jle .L6
cmpl $3, %eax
jne .L7
cmpl $4, 380(%rsp)
jle .L6
.L7:
movq 304(%rsp), %rax
shrq $20, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
mulsd .LC5(%rip), %xmm0
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 304(%rsp), %rax
shrq $20, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
mulsd .LC5(%rip), %xmm0
movsd .LC7(%rip), %xmm1
comisd %xmm0, %xmm1
ja .L20
movl 416(%rsp), %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 416(%rsp)
je .L21
movl 312(%rsp), %edx
shrl $10, %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 360(%rsp), %r8d
movl 356(%rsp), %ecx
movl 352(%rsp), %edx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 320(%rsp), %edx
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
.L3:
movq 1048(%rsp), %rdx
subq %fs:40, %rdx
jne .L22
addq $1056, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
jmp .L3
.L6:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
jmp .L3
.L20:
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
jmp .L3
.L21:
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
jmp .L3
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC5:
.long 0
.long 1062207488
.align 8
.LC7:
.long 0
.long 1073741824
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "tst_gpu.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI0_0:
.quad 0x3f50000000000000 # double 9.765625E-4
.LCPI0_1:
.quad 0x4000000000000000 # double 2
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 1504
.cfi_offset %rbx, -16
movl $0, 12(%rsp)
leaq 12(%rsp), %rdi
callq hipGetDeviceCount
movl 12(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
xorl %edi, %edi
callq hipSetDevice
testl %eax, %eax
je .LBB0_3
# %bb.1:
movl $.Lstr.3, %edi
jmp .LBB0_2
.LBB0_3:
leaq 16(%rsp), %rbx
movq %rbx, %rdi
xorl %esi, %esi
callq hipGetDevicePropertiesR0600
movl $.L.str.2, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq printf
movl 376(%rsp), %esi
movl 380(%rsp), %edx
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
cmpl $3, 376(%rsp)
jl .LBB0_6
# %bb.4:
jne .LBB0_7
# %bb.5:
cmpl $4, 380(%rsp)
jg .LBB0_7
.LBB0_6:
movl $.Lstr.2, %edi
.LBB0_2:
callq puts@PLT
movl $-1, %eax
.LBB0_12:
addq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB0_7:
.cfi_def_cfa_offset 1504
movq 304(%rsp), %rax
shrq $20, %rax
cvtsi2sd %rax, %xmm0
mulsd .LCPI0_0(%rip), %xmm0
movl $.L.str.5, %edi
movb $1, %al
callq printf
movq 304(%rsp), %rax
shrq $20, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
mulsd .LCPI0_0(%rip), %xmm0
movsd .LCPI0_1(%rip), %xmm1 # xmm1 = mem[0],zero
ucomisd %xmm0, %xmm1
jbe .LBB0_9
# %bb.8:
movl $.Lstr.1, %edi
jmp .LBB0_2
.LBB0_9:
movl 416(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
cmpl $0, 416(%rsp)
je .LBB0_10
# %bb.11:
movl 312(%rsp), %esi
shrl $10, %esi
movl $.L.str.9, %edi
xorl %eax, %eax
callq printf
movl 336(%rsp), %esi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
movl 352(%rsp), %esi
movl 356(%rsp), %edx
movl 360(%rsp), %ecx
movl $.L.str.11, %edi
xorl %eax, %eax
callq printf
movl 320(%rsp), %esi
movl $.L.str.12, %edi
xorl %eax, %eax
callq printf
xorl %eax, %eax
jmp .LBB0_12
.LBB0_10:
movl $.Lstr, %edi
jmp .LBB0_2
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Number of GPUs : %d\n"
.size .L.str, 27
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Device name : %s\n"
.size .L.str.2, 27
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Compute Capability : %d.%d\n"
.size .L.str.3, 30
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "totalGlobalMem : %.1f GB\n"
.size .L.str.5, 32
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "canMapHostMem : %d\n"
.size .L.str.7, 27
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "sharedMemPerBlock : %u KB\n"
.size .L.str.9, 30
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "maxThreadsPerBlock : %d\n"
.size .L.str.10, 27
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "maxGridSize : %d, %d, %d\n"
.size .L.str.11, 35
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "regPerBlock : %d\n"
.size .L.str.12, 27
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "[error] Your device do not support host memory mapping. Exit."
.size .Lstr, 62
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "[error] Your device global memory is too small. Exit."
.size .Lstr.1, 54
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "[error] Your device compute capability is too low. Exit."
.size .Lstr.2, 57
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Failed to set Device 0. Exit"
.size .Lstr.3, 29
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<cstdio>
#include "vector_types.h"
extern "C" {
__global__ void cuAdd(int* list, int* elements, int i, int listSize){
int thid = blockIdx.x * blockDim.x + threadIdx.x;
while(thid < listSize){
int value = list[thid] + elements[i];
list[thid+listSize] = value;
thid += blockDim.x * gridDim.x;
}
}
__device__ int2 findMedian(int* tabA, int a, int b, int* tabB, int c, int d);
__global__ void cuPartition(int j, int* prevList, int4* H, int size){
int* newList = prevList+size;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadCounts = 1 << (j-1);
while (tid < threadCounts){
int medianId = tid + threadCounts;
int a = H[medianId].x;
int b = H[medianId].y;
int c = H[medianId].z;
int d = H[medianId].w;
int2 ef = findMedian(prevList, a, b, newList, c, d);
H[2*medianId].x = a;
H[2*medianId].y = ef.x;
H[2*medianId].z = c;
H[2*medianId].w = ef.y;
H[2*medianId + 1].x = ef.x;
H[2*medianId + 1].y = b;
H[2*medianId + 1].z = ef.y;
H[2*medianId + 1].w = d;
tid += blockDim.x * gridDim.x;
}
}
__device__ void mergeInc(int* listA, int beginA, int endA, int* listB, int beginB, int endB, int* result);
__global__ void cuMergeIncreasing(int* lists, int4* H, int listSize, int threads, int* result){
int* newList = lists + listSize;
int tid = blockIdx.x * blockDim.x + threadIdx.x + 1;
while(tid <= threads){
int medianId = tid + threads - 1;
int4 localFetch = H[medianId];
int a = localFetch.x;
int b = localFetch.y;
int c = localFetch.z;
int d = localFetch.w;
mergeInc(lists, a, b, newList, c, d, result);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cuPrune(int* listA, int sizeA, int* listB, int sizeB, int* found, int2* pickedBlocks, int* pickedBlocksCounter, int M){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockDim.x*gridDim.x;
int chunkA = (sizeA + k - 1)/ (k);
int chunkB = (sizeB + k - 1)/ (k);
for(int j = 0; j < k ; j++){
if(*found) return;
int x = listA[tid * chunkA] + listB[(j+1) * chunkB - 1]; // mozemy wyskoczyc jesli chunkA lub ChunbB nie dzieli k
int y = listA[(tid+1) * chunkA - 1] + listB[j * chunkB]; // mozemy wyskoczyc tez
if (x == M || y == M) atomicExch(found, 1);
else if(x < M && y > M){
int pos = atomicAdd(pickedBlocksCounter, 1);
pickedBlocks[pos].x = tid;
pickedBlocks[pos].y = j;
}
}
}
__device__ bool searchSteep(int* listA, int chunkSizeA, int* listB, int chunkSizeB, int M){
int a, b;
a = b = 0;
while(a < chunkSizeA && b < chunkSizeB){
int value = listA[a] + listB[b];
if(value == M) return true;
if(value < M) a++;
else b++;
}
return false;
}
__global__ void cuSearch(int* listA, int sizeA, int* listB, int sizeB, int2* pickedBlocks, int* noPickedBlocks, int* found, int M){
int thid = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockDim.x*gridDim.x;
int chunkA = (sizeA + k - 1)/ (k);
int chunkB = (sizeB + k - 1)/ (k);
while(thid < *noPickedBlocks){
if(*found) return;
int2 idsOfFragmentToCheck = pickedBlocks[thid];
int* shiftedListA = listA + idsOfFragmentToCheck.x * chunkA;
int* shiftedListB = listB + idsOfFragmentToCheck.y * chunkB;
int _sizeA = thid != k-1 ? chunkA : sizeA % chunkA;
int _sizeB = thid != k-1 ? chunkB : sizeB % chunkB;
bool f = searchSteep(shiftedListA, _sizeA, shiftedListB, _sizeB, M);
if(f) *found = true;
thid += k;
}
}
__global__ void cuReverse(int* tab, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= size/2)
return;
int tmp = tab[tid];
tab[tid] = tab[size-tid-1];
tab[size-tid-1] = tmp;
}
__device__ int binsearchInc(int* tab, int l, int r, int value){
while(l < r){
int m = (l + r) / 2;
if(tab[m] >= value){
r = m;
} else{
l = m+1;
}
}
return l;
}
__device__ int2 findMedian(int* tabA, int a, int b, int* tabB, int c, int d){
int aMiddle, bMiddle, otherBegin, otherEnd, otherValue;
int* otherTab;
if(b-a > d-c){
aMiddle = (b + a) / 2;
otherTab = tabB;
otherBegin = c;
otherEnd = d;
otherValue = tabA[aMiddle];
//bMiddle = binsearchInc(tabB, c, d, tabA[aMiddle]);
} else{
bMiddle = (c + d) / 2;
otherTab = tabA;
otherBegin = a;
otherEnd = b;
otherValue = tabB[bMiddle];
//aMiddle = binsearchInc(tabA, a, b, tabB[bMiddle]);
}
int theOtherMiddle = binsearchInc(otherTab, otherBegin, otherEnd, otherValue);
if(b-a > d-c){
bMiddle = theOtherMiddle;
} else{
aMiddle = theOtherMiddle;
}
int2 result;
result.x = aMiddle;
result.y = bMiddle;
return result;
}
__device__ inline void mergeInc(int* listA, int beginA, int endA, int* listB, int beginB, int endB, int* result){
int position = beginA + beginB;
while(beginA < endA && beginB < endB){
if (listA[beginA] < listB[beginB]){
result[position++] = listA[beginA++];
} else{
result[position++] = listB[beginB++];
}
}
while(beginA < endA){
result[position++] = listA[beginA++];
}
while(beginB < endB){
result[position++] = listB[beginB++];
}
}
} | .file "tmpxft_0000c413_00000000-6_kernels.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl searchSteep
.type searchSteep, @function
searchSteep:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size searchSteep, .-searchSteep
.globl binsearchInc
.type binsearchInc, @function
binsearchInc:
.LFB2058:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size binsearchInc, .-binsearchInc
.globl findMedian
.type findMedian, @function
findMedian:
.LFB2059:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2059:
.size findMedian, .-findMedian
.globl _Z28__device_stub__Z5cuAddPiS_iiPiS_ii
.type _Z28__device_stub__Z5cuAddPiS_iiPiS_ii, @function
_Z28__device_stub__Z5cuAddPiS_iiPiS_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq cuAdd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z28__device_stub__Z5cuAddPiS_iiPiS_ii, .-_Z28__device_stub__Z5cuAddPiS_iiPiS_ii
.globl cuAdd
.type cuAdd, @function
cuAdd:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z5cuAddPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size cuAdd, .-cuAdd
.globl _Z39__device_stub__Z11cuPartitioniPiP4int4iiPiP4int4i
.type _Z39__device_stub__Z11cuPartitioniPiP4int4iiPiP4int4i, @function
_Z39__device_stub__Z11cuPartitioniPiP4int4iiPiP4int4i:
.LFB2087:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 24(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 24(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq cuPartition(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z39__device_stub__Z11cuPartitioniPiP4int4iiPiP4int4i, .-_Z39__device_stub__Z11cuPartitioniPiP4int4iiPiP4int4i
.globl cuPartition
.type cuPartition, @function
cuPartition:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z11cuPartitioniPiP4int4iiPiP4int4i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size cuPartition, .-cuPartition
.globl _Z47__device_stub__Z17cuMergeIncreasingPiP4int4iiS_PiP4int4iiS_
.type _Z47__device_stub__Z17cuMergeIncreasingPiP4int4iiS_PiP4int4iiS_, @function
_Z47__device_stub__Z17cuMergeIncreasingPiP4int4iiS_PiP4int4iiS_:
.LFB2089:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L29
.L25:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L30
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq cuMergeIncreasing(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L25
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2089:
.size _Z47__device_stub__Z17cuMergeIncreasingPiP4int4iiS_PiP4int4iiS_, .-_Z47__device_stub__Z17cuMergeIncreasingPiP4int4iiS_PiP4int4iiS_
.globl cuMergeIncreasing
.type cuMergeIncreasing, @function
cuMergeIncreasing:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z47__device_stub__Z17cuMergeIncreasingPiP4int4iiS_PiP4int4iiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size cuMergeIncreasing, .-cuMergeIncreasing
.globl _Z41__device_stub__Z7cuPrunePiiS_iS_P4int2S_iPiiS_iS_P4int2S_i
.type _Z41__device_stub__Z7cuPrunePiiS_iS_P4int2S_iPiiS_iS_P4int2S_i, @function
_Z41__device_stub__Z7cuPrunePiiS_iS_P4int2S_iPiiS_iS_P4int2S_i:
.LFB2091:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movl %esi, 36(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 32(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq 208(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movq %rsp, %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L37
.L33:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L38
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L37:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq cuPrune(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L33
.L38:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2091:
.size _Z41__device_stub__Z7cuPrunePiiS_iS_P4int2S_iPiiS_iS_P4int2S_i, .-_Z41__device_stub__Z7cuPrunePiiS_iS_P4int2S_iPiiS_iS_P4int2S_i
.globl cuPrune
.type cuPrune, @function
cuPrune:
.LFB2092:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z41__device_stub__Z7cuPrunePiiS_iS_P4int2S_iPiiS_iS_P4int2S_i
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size cuPrune, .-cuPrune
.globl _Z42__device_stub__Z8cuSearchPiiS_iP4int2S_S_iPiiS_iP4int2S_S_i
.type _Z42__device_stub__Z8cuSearchPiiS_iP4int2S_S_iPiiS_iP4int2S_S_i, @function
_Z42__device_stub__Z8cuSearchPiiS_iP4int2S_S_iPiiS_iP4int2S_S_i:
.LFB2093:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movl %esi, 36(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 32(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq 208(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movq %rsp, %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L45
.L41:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L46
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq cuSearch(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L41
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2093:
.size _Z42__device_stub__Z8cuSearchPiiS_iP4int2S_S_iPiiS_iP4int2S_S_i, .-_Z42__device_stub__Z8cuSearchPiiS_iP4int2S_S_iPiiS_iP4int2S_S_i
.globl cuSearch
.type cuSearch, @function
cuSearch:
.LFB2094:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z42__device_stub__Z8cuSearchPiiS_iP4int2S_S_iPiiS_iP4int2S_S_i
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2094:
.size cuSearch, .-cuSearch
.globl _Z29__device_stub__Z9cuReversePiiPii
.type _Z29__device_stub__Z9cuReversePiiPii, @function
_Z29__device_stub__Z9cuReversePiiPii:
.LFB2095:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L53
.L49:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L54
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq cuReverse(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L49
.L54:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2095:
.size _Z29__device_stub__Z9cuReversePiiPii, .-_Z29__device_stub__Z9cuReversePiiPii
.globl cuReverse
.type cuReverse, @function
cuReverse:
.LFB2096:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z9cuReversePiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size cuReverse, .-cuReverse
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "cuReverse"
.LC1:
.string "cuSearch"
.LC2:
.string "cuPrune"
.LC3:
.string "cuMergeIncreasing"
.LC4:
.string "cuPartition"
.LC5:
.string "cuAdd"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2098:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq cuReverse(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq cuSearch(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq cuPrune(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq cuMergeIncreasing(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq cuPartition(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq cuAdd(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<cstdio>
#include "vector_types.h"
extern "C" {
__global__ void cuAdd(int* list, int* elements, int i, int listSize){
int thid = blockIdx.x * blockDim.x + threadIdx.x;
while(thid < listSize){
int value = list[thid] + elements[i];
list[thid+listSize] = value;
thid += blockDim.x * gridDim.x;
}
}
__device__ int2 findMedian(int* tabA, int a, int b, int* tabB, int c, int d);
__global__ void cuPartition(int j, int* prevList, int4* H, int size){
int* newList = prevList+size;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadCounts = 1 << (j-1);
while (tid < threadCounts){
int medianId = tid + threadCounts;
int a = H[medianId].x;
int b = H[medianId].y;
int c = H[medianId].z;
int d = H[medianId].w;
int2 ef = findMedian(prevList, a, b, newList, c, d);
H[2*medianId].x = a;
H[2*medianId].y = ef.x;
H[2*medianId].z = c;
H[2*medianId].w = ef.y;
H[2*medianId + 1].x = ef.x;
H[2*medianId + 1].y = b;
H[2*medianId + 1].z = ef.y;
H[2*medianId + 1].w = d;
tid += blockDim.x * gridDim.x;
}
}
__device__ void mergeInc(int* listA, int beginA, int endA, int* listB, int beginB, int endB, int* result);
__global__ void cuMergeIncreasing(int* lists, int4* H, int listSize, int threads, int* result){
int* newList = lists + listSize;
int tid = blockIdx.x * blockDim.x + threadIdx.x + 1;
while(tid <= threads){
int medianId = tid + threads - 1;
int4 localFetch = H[medianId];
int a = localFetch.x;
int b = localFetch.y;
int c = localFetch.z;
int d = localFetch.w;
mergeInc(lists, a, b, newList, c, d, result);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cuPrune(int* listA, int sizeA, int* listB, int sizeB, int* found, int2* pickedBlocks, int* pickedBlocksCounter, int M){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockDim.x*gridDim.x;
int chunkA = (sizeA + k - 1)/ (k);
int chunkB = (sizeB + k - 1)/ (k);
for(int j = 0; j < k ; j++){
if(*found) return;
int x = listA[tid * chunkA] + listB[(j+1) * chunkB - 1]; // mozemy wyskoczyc jesli chunkA lub ChunbB nie dzieli k
int y = listA[(tid+1) * chunkA - 1] + listB[j * chunkB]; // mozemy wyskoczyc tez
if (x == M || y == M) atomicExch(found, 1);
else if(x < M && y > M){
int pos = atomicAdd(pickedBlocksCounter, 1);
pickedBlocks[pos].x = tid;
pickedBlocks[pos].y = j;
}
}
}
__device__ bool searchSteep(int* listA, int chunkSizeA, int* listB, int chunkSizeB, int M){
int a, b;
a = b = 0;
while(a < chunkSizeA && b < chunkSizeB){
int value = listA[a] + listB[b];
if(value == M) return true;
if(value < M) a++;
else b++;
}
return false;
}
__global__ void cuSearch(int* listA, int sizeA, int* listB, int sizeB, int2* pickedBlocks, int* noPickedBlocks, int* found, int M){
int thid = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockDim.x*gridDim.x;
int chunkA = (sizeA + k - 1)/ (k);
int chunkB = (sizeB + k - 1)/ (k);
while(thid < *noPickedBlocks){
if(*found) return;
int2 idsOfFragmentToCheck = pickedBlocks[thid];
int* shiftedListA = listA + idsOfFragmentToCheck.x * chunkA;
int* shiftedListB = listB + idsOfFragmentToCheck.y * chunkB;
int _sizeA = thid != k-1 ? chunkA : sizeA % chunkA;
int _sizeB = thid != k-1 ? chunkB : sizeB % chunkB;
bool f = searchSteep(shiftedListA, _sizeA, shiftedListB, _sizeB, M);
if(f) *found = true;
thid += k;
}
}
__global__ void cuReverse(int* tab, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= size/2)
return;
int tmp = tab[tid];
tab[tid] = tab[size-tid-1];
tab[size-tid-1] = tmp;
}
__device__ int binsearchInc(int* tab, int l, int r, int value){
while(l < r){
int m = (l + r) / 2;
if(tab[m] >= value){
r = m;
} else{
l = m+1;
}
}
return l;
}
__device__ int2 findMedian(int* tabA, int a, int b, int* tabB, int c, int d){
int aMiddle, bMiddle, otherBegin, otherEnd, otherValue;
int* otherTab;
if(b-a > d-c){
aMiddle = (b + a) / 2;
otherTab = tabB;
otherBegin = c;
otherEnd = d;
otherValue = tabA[aMiddle];
//bMiddle = binsearchInc(tabB, c, d, tabA[aMiddle]);
} else{
bMiddle = (c + d) / 2;
otherTab = tabA;
otherBegin = a;
otherEnd = b;
otherValue = tabB[bMiddle];
//aMiddle = binsearchInc(tabA, a, b, tabB[bMiddle]);
}
int theOtherMiddle = binsearchInc(otherTab, otherBegin, otherEnd, otherValue);
if(b-a > d-c){
bMiddle = theOtherMiddle;
} else{
aMiddle = theOtherMiddle;
}
int2 result;
result.x = aMiddle;
result.y = bMiddle;
return result;
}
__device__ inline void mergeInc(int* listA, int beginA, int endA, int* listB, int beginB, int endB, int* result){
int position = beginA + beginB;
while(beginA < endA && beginB < endB){
if (listA[beginA] < listB[beginB]){
result[position++] = listA[beginA++];
} else{
result[position++] = listB[beginB++];
}
}
while(beginA < endA){
result[position++] = listA[beginA++];
}
while(beginB < endB){
result[position++] = listB[beginB++];
}
}
} | #include <hip/hip_runtime.h>
#include<cstdio>
#include "hip/hip_vector_types.h"
extern "C" {
__global__ void cuAdd(int* list, int* elements, int i, int listSize){
int thid = blockIdx.x * blockDim.x + threadIdx.x;
while(thid < listSize){
int value = list[thid] + elements[i];
list[thid+listSize] = value;
thid += blockDim.x * gridDim.x;
}
}
__device__ int2 findMedian(int* tabA, int a, int b, int* tabB, int c, int d);
__global__ void cuPartition(int j, int* prevList, int4* H, int size){
int* newList = prevList+size;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadCounts = 1 << (j-1);
while (tid < threadCounts){
int medianId = tid + threadCounts;
int a = H[medianId].x;
int b = H[medianId].y;
int c = H[medianId].z;
int d = H[medianId].w;
int2 ef = findMedian(prevList, a, b, newList, c, d);
H[2*medianId].x = a;
H[2*medianId].y = ef.x;
H[2*medianId].z = c;
H[2*medianId].w = ef.y;
H[2*medianId + 1].x = ef.x;
H[2*medianId + 1].y = b;
H[2*medianId + 1].z = ef.y;
H[2*medianId + 1].w = d;
tid += blockDim.x * gridDim.x;
}
}
__device__ void mergeInc(int* listA, int beginA, int endA, int* listB, int beginB, int endB, int* result);
__global__ void cuMergeIncreasing(int* lists, int4* H, int listSize, int threads, int* result){
int* newList = lists + listSize;
int tid = blockIdx.x * blockDim.x + threadIdx.x + 1;
while(tid <= threads){
int medianId = tid + threads - 1;
int4 localFetch = H[medianId];
int a = localFetch.x;
int b = localFetch.y;
int c = localFetch.z;
int d = localFetch.w;
mergeInc(lists, a, b, newList, c, d, result);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cuPrune(int* listA, int sizeA, int* listB, int sizeB, int* found, int2* pickedBlocks, int* pickedBlocksCounter, int M){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockDim.x*gridDim.x;
int chunkA = (sizeA + k - 1)/ (k);
int chunkB = (sizeB + k - 1)/ (k);
for(int j = 0; j < k ; j++){
if(*found) return;
int x = listA[tid * chunkA] + listB[(j+1) * chunkB - 1]; // mozemy wyskoczyc jesli chunkA lub ChunbB nie dzieli k
int y = listA[(tid+1) * chunkA - 1] + listB[j * chunkB]; // mozemy wyskoczyc tez
if (x == M || y == M) atomicExch(found, 1);
else if(x < M && y > M){
int pos = atomicAdd(pickedBlocksCounter, 1);
pickedBlocks[pos].x = tid;
pickedBlocks[pos].y = j;
}
}
}
__device__ bool searchSteep(int* listA, int chunkSizeA, int* listB, int chunkSizeB, int M){
int a, b;
a = b = 0;
while(a < chunkSizeA && b < chunkSizeB){
int value = listA[a] + listB[b];
if(value == M) return true;
if(value < M) a++;
else b++;
}
return false;
}
__global__ void cuSearch(int* listA, int sizeA, int* listB, int sizeB, int2* pickedBlocks, int* noPickedBlocks, int* found, int M){
int thid = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockDim.x*gridDim.x;
int chunkA = (sizeA + k - 1)/ (k);
int chunkB = (sizeB + k - 1)/ (k);
while(thid < *noPickedBlocks){
if(*found) return;
int2 idsOfFragmentToCheck = pickedBlocks[thid];
int* shiftedListA = listA + idsOfFragmentToCheck.x * chunkA;
int* shiftedListB = listB + idsOfFragmentToCheck.y * chunkB;
int _sizeA = thid != k-1 ? chunkA : sizeA % chunkA;
int _sizeB = thid != k-1 ? chunkB : sizeB % chunkB;
bool f = searchSteep(shiftedListA, _sizeA, shiftedListB, _sizeB, M);
if(f) *found = true;
thid += k;
}
}
__global__ void cuReverse(int* tab, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= size/2)
return;
int tmp = tab[tid];
tab[tid] = tab[size-tid-1];
tab[size-tid-1] = tmp;
}
__device__ int binsearchInc(int* tab, int l, int r, int value){
while(l < r){
int m = (l + r) / 2;
if(tab[m] >= value){
r = m;
} else{
l = m+1;
}
}
return l;
}
__device__ int2 findMedian(int* tabA, int a, int b, int* tabB, int c, int d){
int aMiddle, bMiddle, otherBegin, otherEnd, otherValue;
int* otherTab;
if(b-a > d-c){
aMiddle = (b + a) / 2;
otherTab = tabB;
otherBegin = c;
otherEnd = d;
otherValue = tabA[aMiddle];
//bMiddle = binsearchInc(tabB, c, d, tabA[aMiddle]);
} else{
bMiddle = (c + d) / 2;
otherTab = tabA;
otherBegin = a;
otherEnd = b;
otherValue = tabB[bMiddle];
//aMiddle = binsearchInc(tabA, a, b, tabB[bMiddle]);
}
int theOtherMiddle = binsearchInc(otherTab, otherBegin, otherEnd, otherValue);
if(b-a > d-c){
bMiddle = theOtherMiddle;
} else{
aMiddle = theOtherMiddle;
}
int2 result;
result.x = aMiddle;
result.y = bMiddle;
return result;
}
__device__ inline void mergeInc(int* listA, int beginA, int endA, int* listB, int beginB, int endB, int* result){
int position = beginA + beginB;
while(beginA < endA && beginB < endB){
if (listA[beginA] < listB[beginB]){
result[position++] = listA[beginA++];
} else{
result[position++] = listB[beginB++];
}
}
while(beginA < endA){
result[position++] = listA[beginA++];
}
while(beginB < endB){
result[position++] = listB[beginB++];
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include<cstdio>
#include "hip/hip_vector_types.h"
extern "C" {
__global__ void cuAdd(int* list, int* elements, int i, int listSize){
int thid = blockIdx.x * blockDim.x + threadIdx.x;
while(thid < listSize){
int value = list[thid] + elements[i];
list[thid+listSize] = value;
thid += blockDim.x * gridDim.x;
}
}
__device__ int2 findMedian(int* tabA, int a, int b, int* tabB, int c, int d);
__global__ void cuPartition(int j, int* prevList, int4* H, int size){
int* newList = prevList+size;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadCounts = 1 << (j-1);
while (tid < threadCounts){
int medianId = tid + threadCounts;
int a = H[medianId].x;
int b = H[medianId].y;
int c = H[medianId].z;
int d = H[medianId].w;
int2 ef = findMedian(prevList, a, b, newList, c, d);
H[2*medianId].x = a;
H[2*medianId].y = ef.x;
H[2*medianId].z = c;
H[2*medianId].w = ef.y;
H[2*medianId + 1].x = ef.x;
H[2*medianId + 1].y = b;
H[2*medianId + 1].z = ef.y;
H[2*medianId + 1].w = d;
tid += blockDim.x * gridDim.x;
}
}
__device__ void mergeInc(int* listA, int beginA, int endA, int* listB, int beginB, int endB, int* result);
__global__ void cuMergeIncreasing(int* lists, int4* H, int listSize, int threads, int* result){
int* newList = lists + listSize;
int tid = blockIdx.x * blockDim.x + threadIdx.x + 1;
while(tid <= threads){
int medianId = tid + threads - 1;
int4 localFetch = H[medianId];
int a = localFetch.x;
int b = localFetch.y;
int c = localFetch.z;
int d = localFetch.w;
mergeInc(lists, a, b, newList, c, d, result);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cuPrune(int* listA, int sizeA, int* listB, int sizeB, int* found, int2* pickedBlocks, int* pickedBlocksCounter, int M){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockDim.x*gridDim.x;
int chunkA = (sizeA + k - 1)/ (k);
int chunkB = (sizeB + k - 1)/ (k);
for(int j = 0; j < k ; j++){
if(*found) return;
int x = listA[tid * chunkA] + listB[(j+1) * chunkB - 1]; // mozemy wyskoczyc jesli chunkA lub ChunbB nie dzieli k
int y = listA[(tid+1) * chunkA - 1] + listB[j * chunkB]; // mozemy wyskoczyc tez
if (x == M || y == M) atomicExch(found, 1);
else if(x < M && y > M){
int pos = atomicAdd(pickedBlocksCounter, 1);
pickedBlocks[pos].x = tid;
pickedBlocks[pos].y = j;
}
}
}
__device__ bool searchSteep(int* listA, int chunkSizeA, int* listB, int chunkSizeB, int M){
int a, b;
a = b = 0;
while(a < chunkSizeA && b < chunkSizeB){
int value = listA[a] + listB[b];
if(value == M) return true;
if(value < M) a++;
else b++;
}
return false;
}
__global__ void cuSearch(int* listA, int sizeA, int* listB, int sizeB, int2* pickedBlocks, int* noPickedBlocks, int* found, int M){
int thid = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockDim.x*gridDim.x;
int chunkA = (sizeA + k - 1)/ (k);
int chunkB = (sizeB + k - 1)/ (k);
while(thid < *noPickedBlocks){
if(*found) return;
int2 idsOfFragmentToCheck = pickedBlocks[thid];
int* shiftedListA = listA + idsOfFragmentToCheck.x * chunkA;
int* shiftedListB = listB + idsOfFragmentToCheck.y * chunkB;
int _sizeA = thid != k-1 ? chunkA : sizeA % chunkA;
int _sizeB = thid != k-1 ? chunkB : sizeB % chunkB;
bool f = searchSteep(shiftedListA, _sizeA, shiftedListB, _sizeB, M);
if(f) *found = true;
thid += k;
}
}
__global__ void cuReverse(int* tab, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= size/2)
return;
int tmp = tab[tid];
tab[tid] = tab[size-tid-1];
tab[size-tid-1] = tmp;
}
__device__ int binsearchInc(int* tab, int l, int r, int value){
while(l < r){
int m = (l + r) / 2;
if(tab[m] >= value){
r = m;
} else{
l = m+1;
}
}
return l;
}
__device__ int2 findMedian(int* tabA, int a, int b, int* tabB, int c, int d){
int aMiddle, bMiddle, otherBegin, otherEnd, otherValue;
int* otherTab;
if(b-a > d-c){
aMiddle = (b + a) / 2;
otherTab = tabB;
otherBegin = c;
otherEnd = d;
otherValue = tabA[aMiddle];
//bMiddle = binsearchInc(tabB, c, d, tabA[aMiddle]);
} else{
bMiddle = (c + d) / 2;
otherTab = tabA;
otherBegin = a;
otherEnd = b;
otherValue = tabB[bMiddle];
//aMiddle = binsearchInc(tabA, a, b, tabB[bMiddle]);
}
int theOtherMiddle = binsearchInc(otherTab, otherBegin, otherEnd, otherValue);
if(b-a > d-c){
bMiddle = theOtherMiddle;
} else{
aMiddle = theOtherMiddle;
}
int2 result;
result.x = aMiddle;
result.y = bMiddle;
return result;
}
__device__ inline void mergeInc(int* listA, int beginA, int endA, int* listB, int beginB, int endB, int* result){
int position = beginA + beginB;
while(beginA < endA && beginB < endB){
if (listA[beginA] < listB[beginB]){
result[position++] = listA[beginA++];
} else{
result[position++] = listB[beginB++];
}
}
while(beginA < endA){
result[position++] = listA[beginA++];
}
while(beginB < endB){
result[position++] = listB[beginB++];
}
}
} | .text
.file "kernels.hip"
.globl __device_stub__cuAdd # -- Begin function __device_stub__cuAdd
.p2align 4, 0x90
.type __device_stub__cuAdd,@function
__device_stub__cuAdd: # @__device_stub__cuAdd
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $cuAdd, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size __device_stub__cuAdd, .Lfunc_end0-__device_stub__cuAdd
.cfi_endproc
# -- End function
.globl __device_stub__cuPartition # -- Begin function __device_stub__cuPartition
.p2align 4, 0x90
.type __device_stub__cuPartition,@function
__device_stub__cuPartition: # @__device_stub__cuPartition
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
movl %ecx, 8(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $cuPartition, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size __device_stub__cuPartition, .Lfunc_end1-__device_stub__cuPartition
.cfi_endproc
# -- End function
.globl __device_stub__cuMergeIncreasing # -- Begin function __device_stub__cuMergeIncreasing
.p2align 4, 0x90
.type __device_stub__cuMergeIncreasing,@function
__device_stub__cuMergeIncreasing: # @__device_stub__cuMergeIncreasing
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movl %ecx, (%rsp)
movq %r8, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsp, %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $cuMergeIncreasing, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size __device_stub__cuMergeIncreasing, .Lfunc_end2-__device_stub__cuMergeIncreasing
.cfi_endproc
# -- End function
.globl __device_stub__cuPrune # -- Begin function __device_stub__cuPrune
.p2align 4, 0x90
.type __device_stub__cuPrune,@function
__device_stub__cuPrune: # @__device_stub__cuPrune
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movl %esi, 12(%rsp)
movq %rdx, 80(%rsp)
movl %ecx, 8(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $cuPrune, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end3:
.size __device_stub__cuPrune, .Lfunc_end3-__device_stub__cuPrune
.cfi_endproc
# -- End function
.globl __device_stub__cuSearch # -- Begin function __device_stub__cuSearch
.p2align 4, 0x90
.type __device_stub__cuSearch,@function
__device_stub__cuSearch: # @__device_stub__cuSearch
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movl %esi, 12(%rsp)
movq %rdx, 80(%rsp)
movl %ecx, 8(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $cuSearch, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end4:
.size __device_stub__cuSearch, .Lfunc_end4-__device_stub__cuSearch
.cfi_endproc
# -- End function
.globl __device_stub__cuReverse # -- Begin function __device_stub__cuReverse
.p2align 4, 0x90
.type __device_stub__cuReverse,@function
__device_stub__cuReverse: # @__device_stub__cuReverse
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $cuReverse, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end5:
.size __device_stub__cuReverse, .Lfunc_end5-__device_stub__cuReverse
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cuAdd, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cuPartition, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cuMergeIncreasing, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cuPrune, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cuSearch, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cuReverse, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type cuAdd,@object # @cuAdd
.section .rodata,"a",@progbits
.globl cuAdd
.p2align 3, 0x0
cuAdd:
.quad __device_stub__cuAdd
.size cuAdd, 8
.type cuPartition,@object # @cuPartition
.globl cuPartition
.p2align 3, 0x0
cuPartition:
.quad __device_stub__cuPartition
.size cuPartition, 8
.type cuMergeIncreasing,@object # @cuMergeIncreasing
.globl cuMergeIncreasing
.p2align 3, 0x0
cuMergeIncreasing:
.quad __device_stub__cuMergeIncreasing
.size cuMergeIncreasing, 8
.type cuPrune,@object # @cuPrune
.globl cuPrune
.p2align 3, 0x0
cuPrune:
.quad __device_stub__cuPrune
.size cuPrune, 8
.type cuSearch,@object # @cuSearch
.globl cuSearch
.p2align 3, 0x0
cuSearch:
.quad __device_stub__cuSearch
.size cuSearch, 8
.type cuReverse,@object # @cuReverse
.globl cuReverse
.p2align 3, 0x0
cuReverse:
.quad __device_stub__cuReverse
.size cuReverse, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "cuAdd"
.size .L__unnamed_1, 6
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "cuPartition"
.size .L__unnamed_2, 12
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "cuMergeIncreasing"
.size .L__unnamed_3, 18
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "cuPrune"
.size .L__unnamed_4, 8
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "cuSearch"
.size .L__unnamed_5, 9
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "cuReverse"
.size .L__unnamed_6, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__cuAdd
.addrsig_sym __device_stub__cuPartition
.addrsig_sym __device_stub__cuMergeIncreasing
.addrsig_sym __device_stub__cuPrune
.addrsig_sym __device_stub__cuSearch
.addrsig_sym __device_stub__cuReverse
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym cuAdd
.addrsig_sym cuPartition
.addrsig_sym cuMergeIncreasing
.addrsig_sym cuPrune
.addrsig_sym cuSearch
.addrsig_sym cuReverse
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0000c413_00000000-6_kernels.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl searchSteep
.type searchSteep, @function
searchSteep:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size searchSteep, .-searchSteep
.globl binsearchInc
.type binsearchInc, @function
binsearchInc:
.LFB2058:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size binsearchInc, .-binsearchInc
.globl findMedian
.type findMedian, @function
findMedian:
.LFB2059:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2059:
.size findMedian, .-findMedian
.globl _Z28__device_stub__Z5cuAddPiS_iiPiS_ii
.type _Z28__device_stub__Z5cuAddPiS_iiPiS_ii, @function
_Z28__device_stub__Z5cuAddPiS_iiPiS_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq cuAdd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z28__device_stub__Z5cuAddPiS_iiPiS_ii, .-_Z28__device_stub__Z5cuAddPiS_iiPiS_ii
.globl cuAdd
.type cuAdd, @function
cuAdd:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z5cuAddPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size cuAdd, .-cuAdd
.globl _Z39__device_stub__Z11cuPartitioniPiP4int4iiPiP4int4i
.type _Z39__device_stub__Z11cuPartitioniPiP4int4iiPiP4int4i, @function
_Z39__device_stub__Z11cuPartitioniPiP4int4iiPiP4int4i:
.LFB2087:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 24(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 24(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq cuPartition(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z39__device_stub__Z11cuPartitioniPiP4int4iiPiP4int4i, .-_Z39__device_stub__Z11cuPartitioniPiP4int4iiPiP4int4i
.globl cuPartition
.type cuPartition, @function
cuPartition:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z11cuPartitioniPiP4int4iiPiP4int4i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size cuPartition, .-cuPartition
.globl _Z47__device_stub__Z17cuMergeIncreasingPiP4int4iiS_PiP4int4iiS_
.type _Z47__device_stub__Z17cuMergeIncreasingPiP4int4iiS_PiP4int4iiS_, @function
_Z47__device_stub__Z17cuMergeIncreasingPiP4int4iiS_PiP4int4iiS_:
.LFB2089:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L29
.L25:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L30
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq cuMergeIncreasing(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L25
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2089:
.size _Z47__device_stub__Z17cuMergeIncreasingPiP4int4iiS_PiP4int4iiS_, .-_Z47__device_stub__Z17cuMergeIncreasingPiP4int4iiS_PiP4int4iiS_
.globl cuMergeIncreasing
.type cuMergeIncreasing, @function
cuMergeIncreasing:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z47__device_stub__Z17cuMergeIncreasingPiP4int4iiS_PiP4int4iiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size cuMergeIncreasing, .-cuMergeIncreasing
.globl _Z41__device_stub__Z7cuPrunePiiS_iS_P4int2S_iPiiS_iS_P4int2S_i
.type _Z41__device_stub__Z7cuPrunePiiS_iS_P4int2S_iPiiS_iS_P4int2S_i, @function
_Z41__device_stub__Z7cuPrunePiiS_iS_P4int2S_iPiiS_iS_P4int2S_i:
.LFB2091:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movl %esi, 36(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 32(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq 208(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movq %rsp, %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L37
.L33:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L38
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L37:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq cuPrune(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L33
.L38:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2091:
.size _Z41__device_stub__Z7cuPrunePiiS_iS_P4int2S_iPiiS_iS_P4int2S_i, .-_Z41__device_stub__Z7cuPrunePiiS_iS_P4int2S_iPiiS_iS_P4int2S_i
.globl cuPrune
.type cuPrune, @function
cuPrune:
.LFB2092:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z41__device_stub__Z7cuPrunePiiS_iS_P4int2S_iPiiS_iS_P4int2S_i
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size cuPrune, .-cuPrune
.globl _Z42__device_stub__Z8cuSearchPiiS_iP4int2S_S_iPiiS_iP4int2S_S_i
.type _Z42__device_stub__Z8cuSearchPiiS_iP4int2S_S_iPiiS_iP4int2S_S_i, @function
_Z42__device_stub__Z8cuSearchPiiS_iP4int2S_S_iPiiS_iP4int2S_S_i:
.LFB2093:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movl %esi, 36(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 32(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq 208(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movq %rsp, %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L45
.L41:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L46
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq cuSearch(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L41
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2093:
.size _Z42__device_stub__Z8cuSearchPiiS_iP4int2S_S_iPiiS_iP4int2S_S_i, .-_Z42__device_stub__Z8cuSearchPiiS_iP4int2S_S_iPiiS_iP4int2S_S_i
.globl cuSearch
.type cuSearch, @function
cuSearch:
.LFB2094:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z42__device_stub__Z8cuSearchPiiS_iP4int2S_S_iPiiS_iP4int2S_S_i
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2094:
.size cuSearch, .-cuSearch
.globl _Z29__device_stub__Z9cuReversePiiPii
.type _Z29__device_stub__Z9cuReversePiiPii, @function
_Z29__device_stub__Z9cuReversePiiPii:
.LFB2095:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L53
.L49:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L54
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq cuReverse(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L49
.L54:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2095:
.size _Z29__device_stub__Z9cuReversePiiPii, .-_Z29__device_stub__Z9cuReversePiiPii
.globl cuReverse
.type cuReverse, @function
cuReverse:
.LFB2096:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z9cuReversePiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size cuReverse, .-cuReverse
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "cuReverse"
.LC1:
.string "cuSearch"
.LC2:
.string "cuPrune"
.LC3:
.string "cuMergeIncreasing"
.LC4:
.string "cuPartition"
.LC5:
.string "cuAdd"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2098:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq cuReverse(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq cuSearch(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq cuPrune(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq cuMergeIncreasing(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq cuPartition(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq cuAdd(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernels.hip"
.globl __device_stub__cuAdd # -- Begin function __device_stub__cuAdd
.p2align 4, 0x90
.type __device_stub__cuAdd,@function
__device_stub__cuAdd: # @__device_stub__cuAdd
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $cuAdd, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size __device_stub__cuAdd, .Lfunc_end0-__device_stub__cuAdd
.cfi_endproc
# -- End function
.globl __device_stub__cuPartition # -- Begin function __device_stub__cuPartition
.p2align 4, 0x90
.type __device_stub__cuPartition,@function
__device_stub__cuPartition: # @__device_stub__cuPartition
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
movl %ecx, 8(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $cuPartition, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size __device_stub__cuPartition, .Lfunc_end1-__device_stub__cuPartition
.cfi_endproc
# -- End function
.globl __device_stub__cuMergeIncreasing # -- Begin function __device_stub__cuMergeIncreasing
.p2align 4, 0x90
.type __device_stub__cuMergeIncreasing,@function
__device_stub__cuMergeIncreasing: # @__device_stub__cuMergeIncreasing
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movl %ecx, (%rsp)
movq %r8, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsp, %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $cuMergeIncreasing, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size __device_stub__cuMergeIncreasing, .Lfunc_end2-__device_stub__cuMergeIncreasing
.cfi_endproc
# -- End function
.globl __device_stub__cuPrune # -- Begin function __device_stub__cuPrune
.p2align 4, 0x90
.type __device_stub__cuPrune,@function
__device_stub__cuPrune: # @__device_stub__cuPrune
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movl %esi, 12(%rsp)
movq %rdx, 80(%rsp)
movl %ecx, 8(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $cuPrune, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end3:
.size __device_stub__cuPrune, .Lfunc_end3-__device_stub__cuPrune
.cfi_endproc
# -- End function
.globl __device_stub__cuSearch # -- Begin function __device_stub__cuSearch
.p2align 4, 0x90
.type __device_stub__cuSearch,@function
__device_stub__cuSearch: # @__device_stub__cuSearch
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movl %esi, 12(%rsp)
movq %rdx, 80(%rsp)
movl %ecx, 8(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $cuSearch, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end4:
.size __device_stub__cuSearch, .Lfunc_end4-__device_stub__cuSearch
.cfi_endproc
# -- End function
.globl __device_stub__cuReverse # -- Begin function __device_stub__cuReverse
.p2align 4, 0x90
.type __device_stub__cuReverse,@function
__device_stub__cuReverse: # @__device_stub__cuReverse
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $cuReverse, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end5:
.size __device_stub__cuReverse, .Lfunc_end5-__device_stub__cuReverse
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cuAdd, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cuPartition, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cuMergeIncreasing, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cuPrune, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cuSearch, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $cuReverse, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type cuAdd,@object # @cuAdd
.section .rodata,"a",@progbits
.globl cuAdd
.p2align 3, 0x0
cuAdd:
.quad __device_stub__cuAdd
.size cuAdd, 8
.type cuPartition,@object # @cuPartition
.globl cuPartition
.p2align 3, 0x0
cuPartition:
.quad __device_stub__cuPartition
.size cuPartition, 8
.type cuMergeIncreasing,@object # @cuMergeIncreasing
.globl cuMergeIncreasing
.p2align 3, 0x0
cuMergeIncreasing:
.quad __device_stub__cuMergeIncreasing
.size cuMergeIncreasing, 8
.type cuPrune,@object # @cuPrune
.globl cuPrune
.p2align 3, 0x0
cuPrune:
.quad __device_stub__cuPrune
.size cuPrune, 8
.type cuSearch,@object # @cuSearch
.globl cuSearch
.p2align 3, 0x0
cuSearch:
.quad __device_stub__cuSearch
.size cuSearch, 8
.type cuReverse,@object # @cuReverse
.globl cuReverse
.p2align 3, 0x0
cuReverse:
.quad __device_stub__cuReverse
.size cuReverse, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "cuAdd"
.size .L__unnamed_1, 6
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "cuPartition"
.size .L__unnamed_2, 12
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "cuMergeIncreasing"
.size .L__unnamed_3, 18
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "cuPrune"
.size .L__unnamed_4, 8
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "cuSearch"
.size .L__unnamed_5, 9
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "cuReverse"
.size .L__unnamed_6, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__cuAdd
.addrsig_sym __device_stub__cuPartition
.addrsig_sym __device_stub__cuMergeIncreasing
.addrsig_sym __device_stub__cuPrune
.addrsig_sym __device_stub__cuSearch
.addrsig_sym __device_stub__cuReverse
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym cuAdd
.addrsig_sym cuPartition
.addrsig_sym cuMergeIncreasing
.addrsig_sym cuPrune
.addrsig_sym cuSearch
.addrsig_sym cuReverse
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //-------------------------------------GPU Implementation of KNN--------------------------------------------------
//---------------------------Train Data store in input.txt and Test data in test.txt------------------------------
#include<iostream>
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<stdlib.h>
#include<stdio.h>
#include<thrust/sort.h>
#include<math.h>
#include<cuda.h>
using namespace std;
// Calculating distance in parallel for one test point and all training point
// Kernal launched with 1*n threads
__global__ void k1(float *gdata,float *gquery,float *gres,int *gid,int N,int count) {
int id = threadIdx.x;
//gres[id*2+0] = id;
gid[id] = id;
float dist = 0;
for(int i=1;i<count;i++){
//printf("%d\t%0.2f\t%0.2f\n",id,gdata[id*count+i],gquery[i]);
dist += (gdata[id*count+i]-gquery[i])*(gdata[id*count+i]-gquery[i]);
}
gres[id] = sqrt(dist);
//printf("%d %0.2f\n",id,gres[id]);
}
/*__global__ void k(float *data,int N,int count){
for(int j=0;j<count;j++){
printf("%d\n",data[threadIdx.x*count+j]);
}
}*/
//Calculating distances in parallel between all train point and test point .
//kernal launched with m*n threads
__global__ void maxkernal(float *data,float *query,float *dis,int *gid,int N,int count){
int id = blockIdx.x*blockDim.x+threadIdx.x;
int i = id/N;
int j = id%N;
//float diss = 0;
for(int k=1;k<count;k++){
//printf("%d %0.2f %0.2f %0.2f %0.2f\n",id,data[j*count+k],query[i*count+k],(data[j*count+k]-query[i*count+k]),dis[id]);
atomicAdd(&dis[id],((data[j*count+k]-query[i*count+k])*(data[j*count+k]-query[i*count+k])));
//printf("%d %0.2f %0.2f %0.2f %0.2f %0.2f\n",id,data[j*count+k],query[i*count+k],(data[j*count+k]-query[i*count+k]),dis[id],((data[j*count+k]-query[i*count+k])*(data[j*count+k]-query[i*count+k])));
}
gid[id] = id;
dis[id] = sqrt(dis[id]);
}
// Accuracy calculation in parallel
__global__ void Accuracy(int *s1,int *s2,int *counter){
int id = threadIdx.x;
//printf("%d %d\n",s1[id],s2[id]);
int x = 1;
if(s1[id]==s2[id]){
atomicAdd(&counter[0],x);
}
}
// Begin of the main function
int main(){
//Reading the train points
int k=15;
int N=135;
int count=0;
FILE *fp;
string s[N];
fp = fopen("input.txt","r");
char ch = ' ';
while(ch!='\n'){
ch = getc(fp);
if(ch==','){
count++;
}
}
float *data = (float *)malloc(N*count*sizeof(float));
for(int i=0;i<N;i++){
for(int j=0;j<count;j++){
fscanf(fp,"%f",&data[i*count+j]);
ch = fgetc(fp);
//cout<<data[i*count+j]<<"\t";
}
char c;
c = fgetc(fp);
while(c!='\n'){
s[i] += c;
c = fgetc(fp);
}
//cout<<s[i]<<"\n";
}
fclose(fp);
float *gdata,*gres,*res;
int *id,*gid;
int *fclass;
/*cudaMalloc(&gdata,N*count*sizeof(float));
cudaMemcpy(gdata,data,N*count*sizeof(float),cudaMemcpyHostToDevice);
k<<<1,N>>>(gdata,N,count);*/
//cout<<"----------------------------------------------------\n";
//Reading the test point
FILE *op;
int m=15;
string s1[m];
int gsres[m];
float *query,*gquery;
float *query2d = (float *)malloc(m*count*sizeof(float));
fclass = (int *)malloc(m*sizeof(int));
op = fopen("test.txt","r");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float ms = 0;
for(int i=0;i<m;i++){
query = (float *)malloc(count*sizeof(float));
for(int j=0;j<count;j++){
fscanf(op,"%f",&query[j]);
query2d[i*count+j] = query[j];
ch = fgetc(op);
//cout<<query[i*count+j]<<"\t";
}
char c;
c = fgetc(op);
while(c!='\n'){
s1[i] += c;
c = fgetc(op);
}
if(s1[i]=="Iris-setosa"){
fclass[i] = 1;
//cout<<"c1";
}
if(s1[i]=="Iris-versicolor"){
fclass[i] = 2;
//cout<<"c2";
}
if(s1[i]=="Iris-virginica"){
fclass[i] = 3;
//cout<<"c3";
}
//cout<<s1[i]<<"\n";
float milliseconds = 0;
cudaEventRecord(start,0);
cudaMalloc(&gquery,count*sizeof(float));
cudaMalloc(&gdata,N*count*sizeof(float));
cudaMalloc(&gres,N*sizeof(float));
cudaMalloc(&gid,N*sizeof(int));
res = (float *)malloc(N*sizeof(float));
id = (int *)malloc(N*sizeof(int));
cudaMemcpy(gdata,data,N*count*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(gquery,query,count*sizeof(float),cudaMemcpyHostToDevice);
//Launching one test point to all train point kernal
k1<<<1,N>>>(gdata,gquery,gres,gid,N,count);
cudaMemcpy(res,gres,N*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(id,gid,N*sizeof(int),cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
ms += milliseconds;
thrust::sort_by_key(res, res + N, id);
int count1,count2,count3;
count1 = count2 = count3 = 0;
//voting process of K closest neighbour
for(int j=0;j<k;j++){
//cout<<i<<" "<<minKarr[j][0]<<" "<<minKarr[j][1]<<"\n";
if(s[id[j]]=="Iris-setosa"){
count1++;
}
if(s[id[j]]=="Iris-versicolor"){
count2++;
}
if(s[id[j]]=="Iris-virginica"){
count3++;
}
}
//cout<<count1<<" "<<count2<<" "<<count3<<"\n";
if(count1>count2){
if(count1>count3){
//count1
gsres[i] = 1;
}
else{
//count3
gsres[i] = 3;
}
}
else{
if(count2>count3){
//count2
gsres[i] = 2;
}
else{
//count3
gsres[i] = 3;
}
}
//cout<<gsres[i]<<"\n";
//cout<<"---------------------------------------------\n";
}
/*for(int i=0;i<m;i++){
printf("%d\n",fclass[i]);
}*/
int *gclass,*ggsres,*gcounter;
int counter[1];
counter[0] = 0;
cudaMalloc(&gclass,m*sizeof(int));
cudaMalloc(&ggsres,m*sizeof(int));
cudaMalloc(&gcounter,1*sizeof(int));
cudaMemcpy(gclass,fclass,m*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(ggsres,gsres,m*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(gcounter,counter,1*sizeof(int),cudaMemcpyHostToDevice);
// Accuracy calculation
Accuracy<<<1,m>>>(gclass,ggsres,gcounter);
cudaMemcpy(counter,gcounter,1*sizeof(int),cudaMemcpyDeviceToHost);
//printf("%d\n",counter[0]);
float acc = counter[0]*100;
acc = acc/m;
printf("Basic KNN Time taken in %f millisecond\n",ms);
//cout<<"Time taken "<<elapsetime<<"\n";
cout<<"Accuracy of KNN "<<acc<<"%"<<"\n";
// prediction on random points
srand(time(0));
float *points = (float *)malloc(count*sizeof(float));
for(int j=0;j<count;j++){
if(j<count-1){
points[j] = rand()%8;
}
else{
points[j] = rand()%3;
}
}
/*for(int j=0;j<count;j++){
cout<<points[j]<<"\t";
}*/
cout<<"\n";
float *dis,*ggdata;
float *gpoint,*gdis;
int *gidd;
int *idd;
cudaMalloc(&gpoint,count*sizeof(float));
cudaMalloc(&ggdata,N*count*sizeof(float));
cudaMalloc(&gdis,N*sizeof(float));
cudaMalloc(&gidd,N*sizeof(int));
dis = (float *)malloc(N*sizeof(float));
idd = (int *)malloc(N*sizeof(int));
cudaMemcpy(ggdata,data,N*count*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(gpoint,points,count*sizeof(float),cudaMemcpyHostToDevice);
//Launching one test point to all train point kernal
k1<<<1,N>>>(gdata,gpoint,gdis,gidd,N,count);
cudaMemcpy(dis,gdis,N*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(idd,gidd,N*sizeof(int),cudaMemcpyDeviceToHost);
thrust::sort_by_key(dis, dis + N, idd);
int count1,count2,count3;
count1 = count2 = count3 = 0;
//voting process of K closest neighbour
for(int i=0;i<k;i++){
if(s[idd[i]]=="Iris-setosa"){
count1++;
}
if(s[idd[i]]=="Iris-versicolor"){
count2++;
}
if(s[idd[i]]=="Iris-virginica"){
count3++;
}
}
//Deciding on voting result
string prediction;
if(count1>count2){
if(count1>count3){
//count1
prediction = "Iris-setosa";
}
else{
//count3
prediction = "Iris-virginica";
}
}
else{
if(count2>count3){
//count2
prediction = "Iris-versicolor";
}
else{
//count3
prediction = "Iris-virginica";
}
}
cout<<"prediction Result "<<prediction<<"\n";
// More parallelism
/*for(int i=0;i<m;i++){
for(int j=0;j<count;j++){
cout<<query2d[i*count+j]<<"\t";
}
cout<<"\n";
}*/
//One more Knn implementation
cudaEvent_t start1, stop1;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
float milliseconds1 = 0;
cudaEventRecord(start1,0);
int *id2d,*gid2d;
int *mres = (int *)malloc(m*sizeof(int));
float *gquery2d,*gdatam,*gdist,*dist;
cudaMalloc(&gquery2d,m*count*sizeof(float));
cudaMemcpy(gquery2d,query2d,m*count*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc(&gdatam,N*count*sizeof(float));
cudaMemcpy(gdatam,data,N*count*sizeof(float),cudaMemcpyHostToDevice);
dist = (float *)malloc(m*N*sizeof(float));
cudaMalloc(&gdist,m*N*sizeof(float));
id2d = (int *)malloc(m*N*sizeof(int));
cudaMalloc(&gid2d,m*N*sizeof(int));
//Distance calculation of KNN through all train and all test points in parallel
//launching M*N threads
maxkernal<<<m,N>>>(gdatam,gquery2d,gdist,gid2d,N,count);
cudaMemcpy(dist,gdist,m*N*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(id2d,gid2d,m*N*sizeof(int),cudaMemcpyDeviceToHost);
cudaEventRecord(stop1,0);
cudaEventSynchronize(stop1);
cudaEventElapsedTime(&milliseconds1, start1, stop1);
for(int i=0;i<m;i++){
float *distance = (float *)malloc(N*sizeof(float));
int *index = (int *)malloc(N*sizeof(int));
for(int j=0;j<N;j++){
distance[j] = dist[i*N+j];
index[j] = id2d[i*N+j];
}
//Sorting the K nearest neighbour.
thrust::sort_by_key(distance, distance + N, index);
int count1,count2,count3;
//voting for K nearest neighbour
count1 = count2 = count3 = 0;
for(int j=0;j<k;j++){
int p = index[j]%N;
//cout<<i<<" "<<minKarr[j][0]<<" "<<minKarr[j][1]<<"\n";
if(s[p]=="Iris-setosa"){
count1++;
}
if(s[p]=="Iris-versicolor"){
count2++;
}
if(s[p]=="Iris-virginica"){
count3++;
}
}
//cout<<count1<<" "<<count2<<" "<<count3<<"\n";
if(count1>count2){
if(count1>count3){
//count1
mres[i] = 1;
}
else{
//count3
mres[i] = 3;
}
}
else{
if(count2>count3){
//count2
mres[i] = 2;
}
else{
//count3
mres[i] = 3;
}
}
//cout<<mres[i]<<"\n";
//cout<<"\n=========================================================================\n";
}
// Accuracy calculation.
int *ggclass,*gggsres,*ggcounter;
int ccounter[1];
ccounter[0] = 0;
cudaMalloc(&ggclass,m*sizeof(int));
cudaMalloc(&gggsres,m*sizeof(int));
cudaMalloc(&ggcounter,1*sizeof(int));
cudaMemcpy(ggclass,fclass,m*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(gggsres,mres,m*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(ggcounter,ccounter,1*sizeof(int),cudaMemcpyHostToDevice);
Accuracy<<<1,m>>>(ggclass,gggsres,ggcounter);
cudaMemcpy(ccounter,ggcounter,1*sizeof(int),cudaMemcpyDeviceToHost);
//printf("%d\n",counter[0]);
float aacc = ccounter[0]*100;
aacc = aacc/m;
printf("Time taken %f\n",milliseconds1);
cout<<"Accuracy of KNN after Max Parallelism "<<acc<<"%"<<"\n";
//cout<<"---------------------------------------------\n";
//Free gpu variables
cudaFree(ggclass);
cudaFree(gggsres);
cudaFree(ggcounter);
cudaFree(gquery2d);
cudaFree(gdatam);
cudaFree(gdis);
cudaFree(gdist);
cudaFree(gid);
cudaFree(gid2d);
cudaFree(gpoint);
cudaFree(gquery);
cudaFree(gdata);
cudaFree(gcounter);
cudaFree(gclass);
cudaFree(gsres);
cudaFree(gres);
cudaFree(gidd);
cudaFree(ggdata);
//Free Cpu variables
free(data);
free(fclass);
free(res);
free(id);
free(query);
free(query2d);
free(points);
free(idd);
free(dis);
free(id2d);
free(mres);
free(dist);
//---------------------------++++++++++++++++++++++++----------------------------
cudaDeviceSynchronize();
return 0;
} | //-------------------------------------GPU Implementation of KNN--------------------------------------------------
//---------------------------Train Data store in input.txt and Test data in test.txt------------------------------
#include<iostream>
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<stdlib.h>
#include<stdio.h>
#include<thrust/sort.h>
#include<math.h>
#include<hip/hip_runtime.h>
using namespace std;
// Calculating distance in parallel for one test point and all training point
// Kernal launched with 1*n threads
__global__ void k1(float *gdata,float *gquery,float *gres,int *gid,int N,int count) {
int id = threadIdx.x;
//gres[id*2+0] = id;
gid[id] = id;
float dist = 0;
for(int i=1;i<count;i++){
//printf("%d\t%0.2f\t%0.2f\n",id,gdata[id*count+i],gquery[i]);
dist += (gdata[id*count+i]-gquery[i])*(gdata[id*count+i]-gquery[i]);
}
gres[id] = sqrt(dist);
//printf("%d %0.2f\n",id,gres[id]);
}
/*__global__ void k(float *data,int N,int count){
for(int j=0;j<count;j++){
printf("%d\n",data[threadIdx.x*count+j]);
}
}*/
//Calculating distances in parallel between all train point and test point .
//kernal launched with m*n threads
__global__ void maxkernal(float *data,float *query,float *dis,int *gid,int N,int count){
int id = blockIdx.x*blockDim.x+threadIdx.x;
int i = id/N;
int j = id%N;
//float diss = 0;
for(int k=1;k<count;k++){
//printf("%d %0.2f %0.2f %0.2f %0.2f\n",id,data[j*count+k],query[i*count+k],(data[j*count+k]-query[i*count+k]),dis[id]);
atomicAdd(&dis[id],((data[j*count+k]-query[i*count+k])*(data[j*count+k]-query[i*count+k])));
//printf("%d %0.2f %0.2f %0.2f %0.2f %0.2f\n",id,data[j*count+k],query[i*count+k],(data[j*count+k]-query[i*count+k]),dis[id],((data[j*count+k]-query[i*count+k])*(data[j*count+k]-query[i*count+k])));
}
gid[id] = id;
dis[id] = sqrt(dis[id]);
}
// Accuracy calculation in parallel
__global__ void Accuracy(int *s1,int *s2,int *counter){
int id = threadIdx.x;
//printf("%d %d\n",s1[id],s2[id]);
int x = 1;
if(s1[id]==s2[id]){
atomicAdd(&counter[0],x);
}
}
// Begin of the main function
int main(){
//Reading the train points
int k=15;
int N=135;
int count=0;
FILE *fp;
string s[N];
fp = fopen("input.txt","r");
char ch = ' ';
while(ch!='\n'){
ch = getc(fp);
if(ch==','){
count++;
}
}
float *data = (float *)malloc(N*count*sizeof(float));
for(int i=0;i<N;i++){
for(int j=0;j<count;j++){
fscanf(fp,"%f",&data[i*count+j]);
ch = fgetc(fp);
//cout<<data[i*count+j]<<"\t";
}
char c;
c = fgetc(fp);
while(c!='\n'){
s[i] += c;
c = fgetc(fp);
}
//cout<<s[i]<<"\n";
}
fclose(fp);
float *gdata,*gres,*res;
int *id,*gid;
int *fclass;
/*cudaMalloc(&gdata,N*count*sizeof(float));
cudaMemcpy(gdata,data,N*count*sizeof(float),cudaMemcpyHostToDevice);
k<<<1,N>>>(gdata,N,count);*/
//cout<<"----------------------------------------------------\n";
//Reading the test point
FILE *op;
int m=15;
string s1[m];
int gsres[m];
float *query,*gquery;
float *query2d = (float *)malloc(m*count*sizeof(float));
fclass = (int *)malloc(m*sizeof(int));
op = fopen("test.txt","r");
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float ms = 0;
for(int i=0;i<m;i++){
query = (float *)malloc(count*sizeof(float));
for(int j=0;j<count;j++){
fscanf(op,"%f",&query[j]);
query2d[i*count+j] = query[j];
ch = fgetc(op);
//cout<<query[i*count+j]<<"\t";
}
char c;
c = fgetc(op);
while(c!='\n'){
s1[i] += c;
c = fgetc(op);
}
if(s1[i]=="Iris-setosa"){
fclass[i] = 1;
//cout<<"c1";
}
if(s1[i]=="Iris-versicolor"){
fclass[i] = 2;
//cout<<"c2";
}
if(s1[i]=="Iris-virginica"){
fclass[i] = 3;
//cout<<"c3";
}
//cout<<s1[i]<<"\n";
float milliseconds = 0;
hipEventRecord(start,0);
hipMalloc(&gquery,count*sizeof(float));
hipMalloc(&gdata,N*count*sizeof(float));
hipMalloc(&gres,N*sizeof(float));
hipMalloc(&gid,N*sizeof(int));
res = (float *)malloc(N*sizeof(float));
id = (int *)malloc(N*sizeof(int));
hipMemcpy(gdata,data,N*count*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(gquery,query,count*sizeof(float),hipMemcpyHostToDevice);
//Launching one test point to all train point kernal
k1<<<1,N>>>(gdata,gquery,gres,gid,N,count);
hipMemcpy(res,gres,N*sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(id,gid,N*sizeof(int),hipMemcpyDeviceToHost);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
ms += milliseconds;
thrust::sort_by_key(res, res + N, id);
int count1,count2,count3;
count1 = count2 = count3 = 0;
//voting process of K closest neighbour
for(int j=0;j<k;j++){
//cout<<i<<" "<<minKarr[j][0]<<" "<<minKarr[j][1]<<"\n";
if(s[id[j]]=="Iris-setosa"){
count1++;
}
if(s[id[j]]=="Iris-versicolor"){
count2++;
}
if(s[id[j]]=="Iris-virginica"){
count3++;
}
}
//cout<<count1<<" "<<count2<<" "<<count3<<"\n";
if(count1>count2){
if(count1>count3){
//count1
gsres[i] = 1;
}
else{
//count3
gsres[i] = 3;
}
}
else{
if(count2>count3){
//count2
gsres[i] = 2;
}
else{
//count3
gsres[i] = 3;
}
}
//cout<<gsres[i]<<"\n";
//cout<<"---------------------------------------------\n";
}
/*for(int i=0;i<m;i++){
printf("%d\n",fclass[i]);
}*/
int *gclass,*ggsres,*gcounter;
int counter[1];
counter[0] = 0;
hipMalloc(&gclass,m*sizeof(int));
hipMalloc(&ggsres,m*sizeof(int));
hipMalloc(&gcounter,1*sizeof(int));
hipMemcpy(gclass,fclass,m*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(ggsres,gsres,m*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(gcounter,counter,1*sizeof(int),hipMemcpyHostToDevice);
// Accuracy calculation
Accuracy<<<1,m>>>(gclass,ggsres,gcounter);
hipMemcpy(counter,gcounter,1*sizeof(int),hipMemcpyDeviceToHost);
//printf("%d\n",counter[0]);
float acc = counter[0]*100;
acc = acc/m;
printf("Basic KNN Time taken in %f millisecond\n",ms);
//cout<<"Time taken "<<elapsetime<<"\n";
cout<<"Accuracy of KNN "<<acc<<"%"<<"\n";
// prediction on random points
srand(time(0));
float *points = (float *)malloc(count*sizeof(float));
for(int j=0;j<count;j++){
if(j<count-1){
points[j] = rand()%8;
}
else{
points[j] = rand()%3;
}
}
/*for(int j=0;j<count;j++){
cout<<points[j]<<"\t";
}*/
cout<<"\n";
float *dis,*ggdata;
float *gpoint,*gdis;
int *gidd;
int *idd;
hipMalloc(&gpoint,count*sizeof(float));
hipMalloc(&ggdata,N*count*sizeof(float));
hipMalloc(&gdis,N*sizeof(float));
hipMalloc(&gidd,N*sizeof(int));
dis = (float *)malloc(N*sizeof(float));
idd = (int *)malloc(N*sizeof(int));
hipMemcpy(ggdata,data,N*count*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(gpoint,points,count*sizeof(float),hipMemcpyHostToDevice);
//Launching one test point to all train point kernal
k1<<<1,N>>>(gdata,gpoint,gdis,gidd,N,count);
hipMemcpy(dis,gdis,N*sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(idd,gidd,N*sizeof(int),hipMemcpyDeviceToHost);
thrust::sort_by_key(dis, dis + N, idd);
int count1,count2,count3;
count1 = count2 = count3 = 0;
//voting process of K closest neighbour
for(int i=0;i<k;i++){
if(s[idd[i]]=="Iris-setosa"){
count1++;
}
if(s[idd[i]]=="Iris-versicolor"){
count2++;
}
if(s[idd[i]]=="Iris-virginica"){
count3++;
}
}
//Deciding on voting result
string prediction;
if(count1>count2){
if(count1>count3){
//count1
prediction = "Iris-setosa";
}
else{
//count3
prediction = "Iris-virginica";
}
}
else{
if(count2>count3){
//count2
prediction = "Iris-versicolor";
}
else{
//count3
prediction = "Iris-virginica";
}
}
cout<<"prediction Result "<<prediction<<"\n";
// More parallelism
/*for(int i=0;i<m;i++){
for(int j=0;j<count;j++){
cout<<query2d[i*count+j]<<"\t";
}
cout<<"\n";
}*/
//One more Knn implementation
hipEvent_t start1, stop1;
hipEventCreate(&start1);
hipEventCreate(&stop1);
float milliseconds1 = 0;
hipEventRecord(start1,0);
int *id2d,*gid2d;
int *mres = (int *)malloc(m*sizeof(int));
float *gquery2d,*gdatam,*gdist,*dist;
hipMalloc(&gquery2d,m*count*sizeof(float));
hipMemcpy(gquery2d,query2d,m*count*sizeof(float),hipMemcpyHostToDevice);
hipMalloc(&gdatam,N*count*sizeof(float));
hipMemcpy(gdatam,data,N*count*sizeof(float),hipMemcpyHostToDevice);
dist = (float *)malloc(m*N*sizeof(float));
hipMalloc(&gdist,m*N*sizeof(float));
id2d = (int *)malloc(m*N*sizeof(int));
hipMalloc(&gid2d,m*N*sizeof(int));
//Distance calculation of KNN through all train and all test points in parallel
//launching M*N threads
maxkernal<<<m,N>>>(gdatam,gquery2d,gdist,gid2d,N,count);
hipMemcpy(dist,gdist,m*N*sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(id2d,gid2d,m*N*sizeof(int),hipMemcpyDeviceToHost);
hipEventRecord(stop1,0);
hipEventSynchronize(stop1);
hipEventElapsedTime(&milliseconds1, start1, stop1);
for(int i=0;i<m;i++){
float *distance = (float *)malloc(N*sizeof(float));
int *index = (int *)malloc(N*sizeof(int));
for(int j=0;j<N;j++){
distance[j] = dist[i*N+j];
index[j] = id2d[i*N+j];
}
//Sorting the K nearest neighbour.
thrust::sort_by_key(distance, distance + N, index);
int count1,count2,count3;
//voting for K nearest neighbour
count1 = count2 = count3 = 0;
for(int j=0;j<k;j++){
int p = index[j]%N;
//cout<<i<<" "<<minKarr[j][0]<<" "<<minKarr[j][1]<<"\n";
if(s[p]=="Iris-setosa"){
count1++;
}
if(s[p]=="Iris-versicolor"){
count2++;
}
if(s[p]=="Iris-virginica"){
count3++;
}
}
//cout<<count1<<" "<<count2<<" "<<count3<<"\n";
if(count1>count2){
if(count1>count3){
//count1
mres[i] = 1;
}
else{
//count3
mres[i] = 3;
}
}
else{
if(count2>count3){
//count2
mres[i] = 2;
}
else{
//count3
mres[i] = 3;
}
}
//cout<<mres[i]<<"\n";
//cout<<"\n=========================================================================\n";
}
// Accuracy calculation.
int *ggclass,*gggsres,*ggcounter;
int ccounter[1];
ccounter[0] = 0;
hipMalloc(&ggclass,m*sizeof(int));
hipMalloc(&gggsres,m*sizeof(int));
hipMalloc(&ggcounter,1*sizeof(int));
hipMemcpy(ggclass,fclass,m*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(gggsres,mres,m*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(ggcounter,ccounter,1*sizeof(int),hipMemcpyHostToDevice);
Accuracy<<<1,m>>>(ggclass,gggsres,ggcounter);
hipMemcpy(ccounter,ggcounter,1*sizeof(int),hipMemcpyDeviceToHost);
//printf("%d\n",counter[0]);
float aacc = ccounter[0]*100;
aacc = aacc/m;
printf("Time taken %f\n",milliseconds1);
cout<<"Accuracy of KNN after Max Parallelism "<<acc<<"%"<<"\n";
//cout<<"---------------------------------------------\n";
//Free gpu variables
hipFree(ggclass);
hipFree(gggsres);
hipFree(ggcounter);
hipFree(gquery2d);
hipFree(gdatam);
hipFree(gdis);
hipFree(gdist);
hipFree(gid);
hipFree(gid2d);
hipFree(gpoint);
hipFree(gquery);
hipFree(gdata);
hipFree(gcounter);
hipFree(gclass);
hipFree(gsres);
hipFree(gres);
hipFree(gidd);
hipFree(ggdata);
//Free Cpu variables
free(data);
free(fclass);
free(res);
free(id);
free(query);
free(query2d);
free(points);
free(idd);
free(dis);
free(id2d);
free(mres);
free(dist);
//---------------------------++++++++++++++++++++++++----------------------------
hipDeviceSynchronize();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //-------------------------------------GPU Implementation of KNN--------------------------------------------------
//---------------------------Train Data store in input.txt and Test data in test.txt------------------------------
#include<iostream>
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<stdlib.h>
#include<stdio.h>
#include<thrust/sort.h>
#include<math.h>
#include<hip/hip_runtime.h>
using namespace std;
// Calculating distance in parallel for one test point and all training point
// Kernal launched with 1*n threads
__global__ void k1(float *gdata,float *gquery,float *gres,int *gid,int N,int count) {
int id = threadIdx.x;
//gres[id*2+0] = id;
gid[id] = id;
float dist = 0;
for(int i=1;i<count;i++){
//printf("%d\t%0.2f\t%0.2f\n",id,gdata[id*count+i],gquery[i]);
dist += (gdata[id*count+i]-gquery[i])*(gdata[id*count+i]-gquery[i]);
}
gres[id] = sqrt(dist);
//printf("%d %0.2f\n",id,gres[id]);
}
/*__global__ void k(float *data,int N,int count){
for(int j=0;j<count;j++){
printf("%d\n",data[threadIdx.x*count+j]);
}
}*/
//Calculating distances in parallel between all train point and test point .
//kernal launched with m*n threads
__global__ void maxkernal(float *data,float *query,float *dis,int *gid,int N,int count){
int id = blockIdx.x*blockDim.x+threadIdx.x;
int i = id/N;
int j = id%N;
//float diss = 0;
for(int k=1;k<count;k++){
//printf("%d %0.2f %0.2f %0.2f %0.2f\n",id,data[j*count+k],query[i*count+k],(data[j*count+k]-query[i*count+k]),dis[id]);
atomicAdd(&dis[id],((data[j*count+k]-query[i*count+k])*(data[j*count+k]-query[i*count+k])));
//printf("%d %0.2f %0.2f %0.2f %0.2f %0.2f\n",id,data[j*count+k],query[i*count+k],(data[j*count+k]-query[i*count+k]),dis[id],((data[j*count+k]-query[i*count+k])*(data[j*count+k]-query[i*count+k])));
}
gid[id] = id;
dis[id] = sqrt(dis[id]);
}
// Accuracy calculation in parallel
__global__ void Accuracy(int *s1,int *s2,int *counter){
int id = threadIdx.x;
//printf("%d %d\n",s1[id],s2[id]);
int x = 1;
if(s1[id]==s2[id]){
atomicAdd(&counter[0],x);
}
}
// Begin of the main function
int main(){
//Reading the train points
int k=15;
int N=135;
int count=0;
FILE *fp;
string s[N];
fp = fopen("input.txt","r");
char ch = ' ';
while(ch!='\n'){
ch = getc(fp);
if(ch==','){
count++;
}
}
float *data = (float *)malloc(N*count*sizeof(float));
for(int i=0;i<N;i++){
for(int j=0;j<count;j++){
fscanf(fp,"%f",&data[i*count+j]);
ch = fgetc(fp);
//cout<<data[i*count+j]<<"\t";
}
char c;
c = fgetc(fp);
while(c!='\n'){
s[i] += c;
c = fgetc(fp);
}
//cout<<s[i]<<"\n";
}
fclose(fp);
float *gdata,*gres,*res;
int *id,*gid;
int *fclass;
/*cudaMalloc(&gdata,N*count*sizeof(float));
cudaMemcpy(gdata,data,N*count*sizeof(float),cudaMemcpyHostToDevice);
k<<<1,N>>>(gdata,N,count);*/
//cout<<"----------------------------------------------------\n";
//Reading the test point
FILE *op;
int m=15;
string s1[m];
int gsres[m];
float *query,*gquery;
float *query2d = (float *)malloc(m*count*sizeof(float));
fclass = (int *)malloc(m*sizeof(int));
op = fopen("test.txt","r");
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float ms = 0;
for(int i=0;i<m;i++){
query = (float *)malloc(count*sizeof(float));
for(int j=0;j<count;j++){
fscanf(op,"%f",&query[j]);
query2d[i*count+j] = query[j];
ch = fgetc(op);
//cout<<query[i*count+j]<<"\t";
}
char c;
c = fgetc(op);
while(c!='\n'){
s1[i] += c;
c = fgetc(op);
}
if(s1[i]=="Iris-setosa"){
fclass[i] = 1;
//cout<<"c1";
}
if(s1[i]=="Iris-versicolor"){
fclass[i] = 2;
//cout<<"c2";
}
if(s1[i]=="Iris-virginica"){
fclass[i] = 3;
//cout<<"c3";
}
//cout<<s1[i]<<"\n";
float milliseconds = 0;
hipEventRecord(start,0);
hipMalloc(&gquery,count*sizeof(float));
hipMalloc(&gdata,N*count*sizeof(float));
hipMalloc(&gres,N*sizeof(float));
hipMalloc(&gid,N*sizeof(int));
res = (float *)malloc(N*sizeof(float));
id = (int *)malloc(N*sizeof(int));
hipMemcpy(gdata,data,N*count*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(gquery,query,count*sizeof(float),hipMemcpyHostToDevice);
//Launching one test point to all train point kernal
k1<<<1,N>>>(gdata,gquery,gres,gid,N,count);
hipMemcpy(res,gres,N*sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(id,gid,N*sizeof(int),hipMemcpyDeviceToHost);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
ms += milliseconds;
thrust::sort_by_key(res, res + N, id);
int count1,count2,count3;
count1 = count2 = count3 = 0;
//voting process of K closest neighbour
for(int j=0;j<k;j++){
//cout<<i<<" "<<minKarr[j][0]<<" "<<minKarr[j][1]<<"\n";
if(s[id[j]]=="Iris-setosa"){
count1++;
}
if(s[id[j]]=="Iris-versicolor"){
count2++;
}
if(s[id[j]]=="Iris-virginica"){
count3++;
}
}
//cout<<count1<<" "<<count2<<" "<<count3<<"\n";
if(count1>count2){
if(count1>count3){
//count1
gsres[i] = 1;
}
else{
//count3
gsres[i] = 3;
}
}
else{
if(count2>count3){
//count2
gsres[i] = 2;
}
else{
//count3
gsres[i] = 3;
}
}
//cout<<gsres[i]<<"\n";
//cout<<"---------------------------------------------\n";
}
/*for(int i=0;i<m;i++){
printf("%d\n",fclass[i]);
}*/
int *gclass,*ggsres,*gcounter;
int counter[1];
counter[0] = 0;
hipMalloc(&gclass,m*sizeof(int));
hipMalloc(&ggsres,m*sizeof(int));
hipMalloc(&gcounter,1*sizeof(int));
hipMemcpy(gclass,fclass,m*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(ggsres,gsres,m*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(gcounter,counter,1*sizeof(int),hipMemcpyHostToDevice);
// Accuracy calculation
Accuracy<<<1,m>>>(gclass,ggsres,gcounter);
hipMemcpy(counter,gcounter,1*sizeof(int),hipMemcpyDeviceToHost);
//printf("%d\n",counter[0]);
float acc = counter[0]*100;
acc = acc/m;
printf("Basic KNN Time taken in %f millisecond\n",ms);
//cout<<"Time taken "<<elapsetime<<"\n";
cout<<"Accuracy of KNN "<<acc<<"%"<<"\n";
// prediction on random points
srand(time(0));
float *points = (float *)malloc(count*sizeof(float));
for(int j=0;j<count;j++){
if(j<count-1){
points[j] = rand()%8;
}
else{
points[j] = rand()%3;
}
}
/*for(int j=0;j<count;j++){
cout<<points[j]<<"\t";
}*/
cout<<"\n";
float *dis,*ggdata;
float *gpoint,*gdis;
int *gidd;
int *idd;
hipMalloc(&gpoint,count*sizeof(float));
hipMalloc(&ggdata,N*count*sizeof(float));
hipMalloc(&gdis,N*sizeof(float));
hipMalloc(&gidd,N*sizeof(int));
dis = (float *)malloc(N*sizeof(float));
idd = (int *)malloc(N*sizeof(int));
hipMemcpy(ggdata,data,N*count*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(gpoint,points,count*sizeof(float),hipMemcpyHostToDevice);
//Launching one test point to all train point kernal
k1<<<1,N>>>(gdata,gpoint,gdis,gidd,N,count);
hipMemcpy(dis,gdis,N*sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(idd,gidd,N*sizeof(int),hipMemcpyDeviceToHost);
thrust::sort_by_key(dis, dis + N, idd);
int count1,count2,count3;
count1 = count2 = count3 = 0;
//voting process of K closest neighbour
for(int i=0;i<k;i++){
if(s[idd[i]]=="Iris-setosa"){
count1++;
}
if(s[idd[i]]=="Iris-versicolor"){
count2++;
}
if(s[idd[i]]=="Iris-virginica"){
count3++;
}
}
//Deciding on voting result
string prediction;
if(count1>count2){
if(count1>count3){
//count1
prediction = "Iris-setosa";
}
else{
//count3
prediction = "Iris-virginica";
}
}
else{
if(count2>count3){
//count2
prediction = "Iris-versicolor";
}
else{
//count3
prediction = "Iris-virginica";
}
}
cout<<"prediction Result "<<prediction<<"\n";
// More parallelism
/*for(int i=0;i<m;i++){
for(int j=0;j<count;j++){
cout<<query2d[i*count+j]<<"\t";
}
cout<<"\n";
}*/
//One more Knn implementation
hipEvent_t start1, stop1;
hipEventCreate(&start1);
hipEventCreate(&stop1);
float milliseconds1 = 0;
hipEventRecord(start1,0);
int *id2d,*gid2d;
int *mres = (int *)malloc(m*sizeof(int));
float *gquery2d,*gdatam,*gdist,*dist;
hipMalloc(&gquery2d,m*count*sizeof(float));
hipMemcpy(gquery2d,query2d,m*count*sizeof(float),hipMemcpyHostToDevice);
hipMalloc(&gdatam,N*count*sizeof(float));
hipMemcpy(gdatam,data,N*count*sizeof(float),hipMemcpyHostToDevice);
dist = (float *)malloc(m*N*sizeof(float));
hipMalloc(&gdist,m*N*sizeof(float));
id2d = (int *)malloc(m*N*sizeof(int));
hipMalloc(&gid2d,m*N*sizeof(int));
//Distance calculation of KNN through all train and all test points in parallel
//launching M*N threads
maxkernal<<<m,N>>>(gdatam,gquery2d,gdist,gid2d,N,count);
hipMemcpy(dist,gdist,m*N*sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(id2d,gid2d,m*N*sizeof(int),hipMemcpyDeviceToHost);
hipEventRecord(stop1,0);
hipEventSynchronize(stop1);
hipEventElapsedTime(&milliseconds1, start1, stop1);
for(int i=0;i<m;i++){
float *distance = (float *)malloc(N*sizeof(float));
int *index = (int *)malloc(N*sizeof(int));
for(int j=0;j<N;j++){
distance[j] = dist[i*N+j];
index[j] = id2d[i*N+j];
}
//Sorting the K nearest neighbour.
thrust::sort_by_key(distance, distance + N, index);
int count1,count2,count3;
//voting for K nearest neighbour
count1 = count2 = count3 = 0;
for(int j=0;j<k;j++){
int p = index[j]%N;
//cout<<i<<" "<<minKarr[j][0]<<" "<<minKarr[j][1]<<"\n";
if(s[p]=="Iris-setosa"){
count1++;
}
if(s[p]=="Iris-versicolor"){
count2++;
}
if(s[p]=="Iris-virginica"){
count3++;
}
}
//cout<<count1<<" "<<count2<<" "<<count3<<"\n";
if(count1>count2){
if(count1>count3){
//count1
mres[i] = 1;
}
else{
//count3
mres[i] = 3;
}
}
else{
if(count2>count3){
//count2
mres[i] = 2;
}
else{
//count3
mres[i] = 3;
}
}
//cout<<mres[i]<<"\n";
//cout<<"\n=========================================================================\n";
}
// Accuracy calculation.
int *ggclass,*gggsres,*ggcounter;
int ccounter[1];
ccounter[0] = 0;
hipMalloc(&ggclass,m*sizeof(int));
hipMalloc(&gggsres,m*sizeof(int));
hipMalloc(&ggcounter,1*sizeof(int));
hipMemcpy(ggclass,fclass,m*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(gggsres,mres,m*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(ggcounter,ccounter,1*sizeof(int),hipMemcpyHostToDevice);
Accuracy<<<1,m>>>(ggclass,gggsres,ggcounter);
hipMemcpy(ccounter,ggcounter,1*sizeof(int),hipMemcpyDeviceToHost);
//printf("%d\n",counter[0]);
float aacc = ccounter[0]*100;
aacc = aacc/m;
printf("Time taken %f\n",milliseconds1);
cout<<"Accuracy of KNN after Max Parallelism "<<acc<<"%"<<"\n";
//cout<<"---------------------------------------------\n";
//Free gpu variables
hipFree(ggclass);
hipFree(gggsres);
hipFree(ggcounter);
hipFree(gquery2d);
hipFree(gdatam);
hipFree(gdis);
hipFree(gdist);
hipFree(gid);
hipFree(gid2d);
hipFree(gpoint);
hipFree(gquery);
hipFree(gdata);
hipFree(gcounter);
hipFree(gclass);
hipFree(gsres);
hipFree(gres);
hipFree(gidd);
hipFree(ggdata);
//Free Cpu variables
free(data);
free(fclass);
free(res);
free(id);
free(query);
free(query2d);
free(points);
free(idd);
free(dis);
free(id2d);
free(mres);
free(dist);
//---------------------------++++++++++++++++++++++++----------------------------
hipDeviceSynchronize();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z2k1PfS_S_Piii
.globl _Z2k1PfS_S_Piii
.p2align 8
.type _Z2k1PfS_S_Piii,@function
_Z2k1PfS_S_Piii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x18
v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 2, v0
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 2
global_store_b32 v1, v0, s[4:5]
s_cbranch_scc1 .LBB0_3
s_load_b128 s[4:7], s[0:1], 0x0
v_mad_u64_u32 v[1:2], null, v0, s2, 1
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
s_add_i32 s4, s2, -1
s_add_u32 s2, s6, 4
s_addc_u32 s3, s7, 0
.LBB0_2:
global_load_b32 v1, v[3:4], off
s_load_b32 s5, s[2:3], 0x0
v_add_co_u32 v3, vcc_lo, v3, 4
s_add_i32 s4, s4, -1
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
s_cmp_eq_u32 s4, 0
s_waitcnt vmcnt(0) lgkmcnt(0)
v_subrev_f32_e32 v1, s5, v1
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v2, v1, v1
s_cbranch_scc0 .LBB0_2
.LBB0_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v2
v_mul_f32_e32 v1, 0x4f800000, v2
s_load_b64 s[0:1], s[0:1], 0x10
v_dual_cndmask_b32 v1, v2, v1 :: v_dual_lshlrev_b32 v0, 2, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_sqrt_f32_e32 v2, v1
s_waitcnt_depctr 0xfff
v_add_nc_u32_e32 v4, 1, v2
v_add_nc_u32_e32 v3, -1, v2
v_fma_f32 v6, -v4, v2, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v5, -v3, v2, v1
v_cmp_ge_f32_e64 s2, 0, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cndmask_b32_e64 v2, v2, v3, s2
v_cmp_lt_f32_e64 s2, 0, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e64 v2, v2, v4, s2
v_mul_f32_e32 v3, 0x37800000, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e32 v2, v2, v3, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v1, 0x260
v_cndmask_b32_e32 v1, v2, v1, vcc_lo
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z2k1PfS_S_Piii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 40
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 8
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z2k1PfS_S_Piii, .Lfunc_end0-_Z2k1PfS_S_Piii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z9maxkernalPfS_S_Piii
.globl _Z9maxkernalPfS_S_Piii
.p2align 8
.type _Z9maxkernalPfS_S_Piii,@function
_Z9maxkernalPfS_S_Piii:
s_clause 0x2
s_load_b32 s4, s[0:1], 0x34
s_load_b32 s8, s[0:1], 0x24
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_cmp_lt_i32 s8, 2
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
s_cbranch_scc1 .LBB1_5
s_load_b32 s4, s[0:1], 0x20
v_ashrrev_i32_e32 v4, 31, v1
s_mov_b32 s9, 1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v5, v1, v4
v_xor_b32_e32 v5, v5, v4
s_waitcnt lgkmcnt(0)
s_ashr_i32 s5, s4, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_add_i32 s6, s4, s5
v_xor_b32_e32 v4, s5, v4
s_xor_b32 s6, s6, s5
v_cvt_f32_u32_e32 v0, s6
s_sub_i32 s7, 0, s6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v0, v0
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x4f7ffffe, v0
v_cvt_u32_f32_e32 v0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, s7, v0
v_mul_hi_u32 v3, v0, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v0, v0, v3
v_mul_hi_u32 v0, v5, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, v0, s6
v_sub_nc_u32_e32 v3, v5, v3
v_add_nc_u32_e32 v5, 1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v6, s6, v3
v_cmp_le_u32_e32 vcc_lo, s6, v3
v_dual_cndmask_b32 v3, v3, v6 :: v_dual_cndmask_b32 v0, v0, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_le_u32_e32 vcc_lo, s6, v3
v_add_nc_u32_e32 v5, 1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v0, v0, v5, vcc_lo
v_xor_b32_e32 v0, v0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v0, v0, v4
v_mul_lo_u32 v3, v0, s4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v0, v0, s8
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v5, v1, v3
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_mul_lo_u32 v7, v5, s8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s2, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB1_2:
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v5, s9, v7
v_add_nc_u32_e32 v8, s9, v0
s_mov_b32 s10, 0
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v9, 31, v8
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[8:9], 2, v[8:9]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v5, vcc_lo, s4, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo
v_add_co_u32 v8, vcc_lo, s6, v8
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v9, vcc_lo
global_load_b32 v5, v[5:6], off
global_load_b32 v8, v[8:9], off
global_load_b32 v6, v[3:4], off
s_waitcnt vmcnt(1)
v_sub_f32_e32 v5, v5, v8
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v8, v5, v5
.LBB1_3:
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v5, v6, v8
global_atomic_cmpswap_b32 v5, v[3:4], v[5:6], off glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v5, v6
v_mov_b32_e32 v6, v5
s_or_b32 s10, vcc_lo, s10
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s10
s_cbranch_execnz .LBB1_3
s_or_b32 exec_lo, exec_lo, s10
s_add_i32 s9, s9, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s9, s8
s_cbranch_scc0 .LBB1_2
.LBB1_5:
s_set_inst_prefetch_distance 0x2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v4, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v3, vcc_lo
s_load_b64 s[2:3], s[0:1], 0x18
global_load_b32 v0, v[4:5], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v6, 0x4f800000, v0
v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v0, v0, v6, vcc_lo
v_sqrt_f32_e32 v6, v0
s_waitcnt_depctr 0xfff
v_add_nc_u32_e32 v7, -1, v6
v_add_nc_u32_e32 v8, 1, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f32 v9, -v7, v6, v0
v_fma_f32 v10, -v8, v6, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ge_f32_e64 s0, 0, v9
v_cndmask_b32_e64 v6, v6, v7, s0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_lt_f32_e64 s0, 0, v10
v_cndmask_b32_e64 v6, v6, v8, s0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v7, 0x37800000, v6
v_cndmask_b32_e32 v6, v6, v7, vcc_lo
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v0, 0x260
v_cndmask_b32_e32 v0, v6, v0, vcc_lo
global_store_b32 v[2:3], v1, off
global_store_b32 v[4:5], v0, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9maxkernalPfS_S_Piii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z9maxkernalPfS_S_Piii, .Lfunc_end1-_Z9maxkernalPfS_S_Piii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z8AccuracyPiS_S_
.globl _Z8AccuracyPiS_S_
.p2align 8
.type _Z8AccuracyPiS_S_,@function
_Z8AccuracyPiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_mov_b32 s2, exec_lo
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v0, v0, s[6:7]
s_waitcnt vmcnt(0)
v_cmpx_eq_u32_e64 v1, v0
s_cbranch_execz .LBB2_3
s_mov_b32 s2, exec_lo
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mbcnt_lo_u32_b32 v0, s2, 0
v_cmp_eq_u32_e32 vcc_lo, 0, v0
s_and_b32 s3, exec_lo, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_mov_b32 exec_lo, s3
s_cbranch_execz .LBB2_3
s_load_b64 s[0:1], s[0:1], 0x10
s_bcnt1_i32_b32 s2, s2
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
s_waitcnt lgkmcnt(0)
global_atomic_add_u32 v0, v1, s[0:1]
.LBB2_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8AccuracyPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 8
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z8AccuracyPiS_S_, .Lfunc_end2-_Z8AccuracyPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 40
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z2k1PfS_S_Piii
.private_segment_fixed_size: 0
.sgpr_count: 10
.sgpr_spill_count: 0
.symbol: _Z2k1PfS_S_Piii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9maxkernalPfS_S_Piii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9maxkernalPfS_S_Piii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8AccuracyPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 10
.sgpr_spill_count: 0
.symbol: _Z8AccuracyPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#define LEN_F 3073
#define TILE_WIDTH 32
// 3073/32 = 97.
__global__ void sgd(float *x, float* y, float* weights,
float *single_dw, /* dw computed by one data point, with size (3073, 10) */
float reg_strength,
float learning_rate,
int total_examples,
float *dot_XW,
float *loss) /* dot_XW is with size (10, 1) */
{
/* blockDim.x = 10, blockDim.y = 32 */
int tx = threadIdx.x; //10
int ty = threadIdx.y; //32
float tmp_w, tmp_dw;
int yi, t, data_point;
__shared__ float weights_shared[TILE_WIDTH][10];
__shared__ float x_shared[TILE_WIDTH];
__shared__ float ds[10];
__shared__ float sum_ds;
__shared__ float distance[10];
__shared__ float loss_i[10];
__shared__ float W_square;
__shared__ float sum_loss;
float W_square_single = 0;
float sum_value=0;
// 2D block, (10, 32, 1)
if (tx == 0 && ty ==0) {
sum_loss = 0;
W_square = 0;
}
for(data_point =0; data_point < total_examples; data_point++) {
for (t = 0; t < (LEN_F-1)/TILE_WIDTH + 1; t++) {
if ((t * TILE_WIDTH + ty) < LEN_F)
weights_shared[ty][tx] = weights[(t * TILE_WIDTH + ty)* 10 + tx];
else
weights_shared[ty][tx] =0;
if( (t*TILE_WIDTH+ty) < LEN_F)
x_shared[ty] = x[data_point * LEN_F + t *TILE_WIDTH + ty];
else
x_shared[ty]=0;
__syncthreads();
for(int k=0 ; k < TILE_WIDTH; k++)
sum_value+= x_shared[k] * weights_shared[k][tx];
}//end--of--tile
// tx is the indexing of column {0, 1, 2, ..., 9}
dot_XW[tx] = sum_value;
__syncthreads();
// dot_XW should finish updating by all threads
if(ty==0) {
yi = (int) y[data_point]; //6
distance[tx] = dot_XW[tx] - dot_XW[yi] + 1;
}
__syncthreads();
if(ty==0) {
if (distance[tx] > 0) {
ds[tx] = 1;
} else {
ds[tx] = 0;
}
ds[yi] = 0;
atomicAdd(&sum_ds, ds[tx]);
}
__syncthreads();
// calculating loss by accumulating 200 data point.
if(ty==0) {
if (distance[tx] > 0) {
loss_i[tx] = distance[tx];
} else {
loss_i[tx] = 0;
}
loss_i[yi] = 0;
atomicAdd(&sum_loss, loss_i[tx]); //loss_i is (10, 1)
// __syncthreads();
}
if(ty==0) {
ds[yi] = -1 * sum_ds;
for(int ii=0 ; ii< LEN_F ; ii++) {
int idx = ii * 10 + tx;
single_dw[idx] += x[ii] * ds[tx];
}
}
__syncthreads(); // wait for all 10 threads to finish the single_dw matrix.
}//End--of--Data-Point
if(ty==0) {
for(int ii=0 ; ii< LEN_F ; ii++) {
int idx = ii* 10 + tx;
tmp_w = weights[idx];
tmp_dw = single_dw[idx]/total_examples + 2 * reg_strength * tmp_w;
W_square_single += tmp_w * tmp_w; // calculate for one column of W, for computing loss
weights[idx] = tmp_w - learning_rate * tmp_dw;
}
// 10 threads add to W_square;
atomicAdd(&W_square, W_square_single);
// __syncthreads();
if (tx == 0) { // only one thread is calculating the loss
loss[0] = sum_loss/total_examples + reg_strength * W_square;
}
}//end--of--ty
__syncthreads();
}//End--of--global | .file "tmpxft_000d3682_00000000-6_sgd_cifar_single_blk_normal_mult.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z3sgdPfS_S_S_ffiS_S_PfS_S_S_ffiS_S_
.type _Z35__device_stub__Z3sgdPfS_S_S_ffiS_S_PfS_S_S_ffiS_S_, @function
_Z35__device_stub__Z3sgdPfS_S_S_ffiS_S_PfS_S_S_ffiS_S_:
.LFB2081:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movss %xmm0, 28(%rsp)
movss %xmm1, 24(%rsp)
movl %r8d, 20(%rsp)
movq %r9, 8(%rsp)
movq 224(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 28(%rsp), %rax
movq %rax, 160(%rsp)
leaq 24(%rsp), %rax
movq %rax, 168(%rsp)
leaq 20(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
movq %rsp, %rax
movq %rax, 192(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z3sgdPfS_S_S_ffiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z35__device_stub__Z3sgdPfS_S_S_ffiS_S_PfS_S_S_ffiS_S_, .-_Z35__device_stub__Z3sgdPfS_S_S_ffiS_S_PfS_S_S_ffiS_S_
.globl _Z3sgdPfS_S_S_ffiS_S_
.type _Z3sgdPfS_S_S_ffiS_S_, @function
_Z3sgdPfS_S_S_ffiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z35__device_stub__Z3sgdPfS_S_S_ffiS_S_PfS_S_S_ffiS_S_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _Z3sgdPfS_S_S_ffiS_S_, .-_Z3sgdPfS_S_S_ffiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z3sgdPfS_S_S_ffiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z3sgdPfS_S_S_ffiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#define LEN_F 3073
#define TILE_WIDTH 32
// 3073/32 = 97.
__global__ void sgd(float *x, float* y, float* weights,
float *single_dw, /* dw computed by one data point, with size (3073, 10) */
float reg_strength,
float learning_rate,
int total_examples,
float *dot_XW,
float *loss) /* dot_XW is with size (10, 1) */
{
/* blockDim.x = 10, blockDim.y = 32 */
int tx = threadIdx.x; //10
int ty = threadIdx.y; //32
float tmp_w, tmp_dw;
int yi, t, data_point;
__shared__ float weights_shared[TILE_WIDTH][10];
__shared__ float x_shared[TILE_WIDTH];
__shared__ float ds[10];
__shared__ float sum_ds;
__shared__ float distance[10];
__shared__ float loss_i[10];
__shared__ float W_square;
__shared__ float sum_loss;
float W_square_single = 0;
float sum_value=0;
// 2D block, (10, 32, 1)
if (tx == 0 && ty ==0) {
sum_loss = 0;
W_square = 0;
}
for(data_point =0; data_point < total_examples; data_point++) {
for (t = 0; t < (LEN_F-1)/TILE_WIDTH + 1; t++) {
if ((t * TILE_WIDTH + ty) < LEN_F)
weights_shared[ty][tx] = weights[(t * TILE_WIDTH + ty)* 10 + tx];
else
weights_shared[ty][tx] =0;
if( (t*TILE_WIDTH+ty) < LEN_F)
x_shared[ty] = x[data_point * LEN_F + t *TILE_WIDTH + ty];
else
x_shared[ty]=0;
__syncthreads();
for(int k=0 ; k < TILE_WIDTH; k++)
sum_value+= x_shared[k] * weights_shared[k][tx];
}//end--of--tile
// tx is the indexing of column {0, 1, 2, ..., 9}
dot_XW[tx] = sum_value;
__syncthreads();
// dot_XW should finish updating by all threads
if(ty==0) {
yi = (int) y[data_point]; //6
distance[tx] = dot_XW[tx] - dot_XW[yi] + 1;
}
__syncthreads();
if(ty==0) {
if (distance[tx] > 0) {
ds[tx] = 1;
} else {
ds[tx] = 0;
}
ds[yi] = 0;
atomicAdd(&sum_ds, ds[tx]);
}
__syncthreads();
// calculating loss by accumulating 200 data point.
if(ty==0) {
if (distance[tx] > 0) {
loss_i[tx] = distance[tx];
} else {
loss_i[tx] = 0;
}
loss_i[yi] = 0;
atomicAdd(&sum_loss, loss_i[tx]); //loss_i is (10, 1)
// __syncthreads();
}
if(ty==0) {
ds[yi] = -1 * sum_ds;
for(int ii=0 ; ii< LEN_F ; ii++) {
int idx = ii * 10 + tx;
single_dw[idx] += x[ii] * ds[tx];
}
}
__syncthreads(); // wait for all 10 threads to finish the single_dw matrix.
}//End--of--Data-Point
if(ty==0) {
for(int ii=0 ; ii< LEN_F ; ii++) {
int idx = ii* 10 + tx;
tmp_w = weights[idx];
tmp_dw = single_dw[idx]/total_examples + 2 * reg_strength * tmp_w;
W_square_single += tmp_w * tmp_w; // calculate for one column of W, for computing loss
weights[idx] = tmp_w - learning_rate * tmp_dw;
}
// 10 threads add to W_square;
atomicAdd(&W_square, W_square_single);
// __syncthreads();
if (tx == 0) { // only one thread is calculating the loss
loss[0] = sum_loss/total_examples + reg_strength * W_square;
}
}//end--of--ty
__syncthreads();
}//End--of--global | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define LEN_F 3073
#define TILE_WIDTH 32
// 3073/32 = 97.
__global__ void sgd(float *x, float* y, float* weights,
float *single_dw, /* dw computed by one data point, with size (3073, 10) */
float reg_strength,
float learning_rate,
int total_examples,
float *dot_XW,
float *loss) /* dot_XW is with size (10, 1) */
{
/* blockDim.x = 10, blockDim.y = 32 */
int tx = threadIdx.x; //10
int ty = threadIdx.y; //32
float tmp_w, tmp_dw;
int yi, t, data_point;
__shared__ float weights_shared[TILE_WIDTH][10];
__shared__ float x_shared[TILE_WIDTH];
__shared__ float ds[10];
__shared__ float sum_ds;
__shared__ float distance[10];
__shared__ float loss_i[10];
__shared__ float W_square;
__shared__ float sum_loss;
float W_square_single = 0;
float sum_value=0;
// 2D block, (10, 32, 1)
if (tx == 0 && ty ==0) {
sum_loss = 0;
W_square = 0;
}
for(data_point =0; data_point < total_examples; data_point++) {
for (t = 0; t < (LEN_F-1)/TILE_WIDTH + 1; t++) {
if ((t * TILE_WIDTH + ty) < LEN_F)
weights_shared[ty][tx] = weights[(t * TILE_WIDTH + ty)* 10 + tx];
else
weights_shared[ty][tx] =0;
if( (t*TILE_WIDTH+ty) < LEN_F)
x_shared[ty] = x[data_point * LEN_F + t *TILE_WIDTH + ty];
else
x_shared[ty]=0;
__syncthreads();
for(int k=0 ; k < TILE_WIDTH; k++)
sum_value+= x_shared[k] * weights_shared[k][tx];
}//end--of--tile
// tx is the indexing of column {0, 1, 2, ..., 9}
dot_XW[tx] = sum_value;
__syncthreads();
// dot_XW should finish updating by all threads
if(ty==0) {
yi = (int) y[data_point]; //6
distance[tx] = dot_XW[tx] - dot_XW[yi] + 1;
}
__syncthreads();
if(ty==0) {
if (distance[tx] > 0) {
ds[tx] = 1;
} else {
ds[tx] = 0;
}
ds[yi] = 0;
atomicAdd(&sum_ds, ds[tx]);
}
__syncthreads();
// calculating loss by accumulating 200 data point.
if(ty==0) {
if (distance[tx] > 0) {
loss_i[tx] = distance[tx];
} else {
loss_i[tx] = 0;
}
loss_i[yi] = 0;
atomicAdd(&sum_loss, loss_i[tx]); //loss_i is (10, 1)
// __syncthreads();
}
if(ty==0) {
ds[yi] = -1 * sum_ds;
for(int ii=0 ; ii< LEN_F ; ii++) {
int idx = ii * 10 + tx;
single_dw[idx] += x[ii] * ds[tx];
}
}
__syncthreads(); // wait for all 10 threads to finish the single_dw matrix.
}//End--of--Data-Point
if(ty==0) {
for(int ii=0 ; ii< LEN_F ; ii++) {
int idx = ii* 10 + tx;
tmp_w = weights[idx];
tmp_dw = single_dw[idx]/total_examples + 2 * reg_strength * tmp_w;
W_square_single += tmp_w * tmp_w; // calculate for one column of W, for computing loss
weights[idx] = tmp_w - learning_rate * tmp_dw;
}
// 10 threads add to W_square;
atomicAdd(&W_square, W_square_single);
// __syncthreads();
if (tx == 0) { // only one thread is calculating the loss
loss[0] = sum_loss/total_examples + reg_strength * W_square;
}
}//end--of--ty
__syncthreads();
}//End--of--global |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define LEN_F 3073
#define TILE_WIDTH 32
// 3073/32 = 97.
__global__ void sgd(float *x, float* y, float* weights,
float *single_dw, /* dw computed by one data point, with size (3073, 10) */
float reg_strength,
float learning_rate,
int total_examples,
float *dot_XW,
float *loss) /* dot_XW is with size (10, 1) */
{
/* blockDim.x = 10, blockDim.y = 32 */
int tx = threadIdx.x; //10
int ty = threadIdx.y; //32
float tmp_w, tmp_dw;
int yi, t, data_point;
__shared__ float weights_shared[TILE_WIDTH][10];
__shared__ float x_shared[TILE_WIDTH];
__shared__ float ds[10];
__shared__ float sum_ds;
__shared__ float distance[10];
__shared__ float loss_i[10];
__shared__ float W_square;
__shared__ float sum_loss;
float W_square_single = 0;
float sum_value=0;
// 2D block, (10, 32, 1)
if (tx == 0 && ty ==0) {
sum_loss = 0;
W_square = 0;
}
for(data_point =0; data_point < total_examples; data_point++) {
for (t = 0; t < (LEN_F-1)/TILE_WIDTH + 1; t++) {
if ((t * TILE_WIDTH + ty) < LEN_F)
weights_shared[ty][tx] = weights[(t * TILE_WIDTH + ty)* 10 + tx];
else
weights_shared[ty][tx] =0;
if( (t*TILE_WIDTH+ty) < LEN_F)
x_shared[ty] = x[data_point * LEN_F + t *TILE_WIDTH + ty];
else
x_shared[ty]=0;
__syncthreads();
for(int k=0 ; k < TILE_WIDTH; k++)
sum_value+= x_shared[k] * weights_shared[k][tx];
}//end--of--tile
// tx is the indexing of column {0, 1, 2, ..., 9}
dot_XW[tx] = sum_value;
__syncthreads();
// dot_XW should finish updating by all threads
if(ty==0) {
yi = (int) y[data_point]; //6
distance[tx] = dot_XW[tx] - dot_XW[yi] + 1;
}
__syncthreads();
if(ty==0) {
if (distance[tx] > 0) {
ds[tx] = 1;
} else {
ds[tx] = 0;
}
ds[yi] = 0;
atomicAdd(&sum_ds, ds[tx]);
}
__syncthreads();
// calculating loss by accumulating 200 data point.
if(ty==0) {
if (distance[tx] > 0) {
loss_i[tx] = distance[tx];
} else {
loss_i[tx] = 0;
}
loss_i[yi] = 0;
atomicAdd(&sum_loss, loss_i[tx]); //loss_i is (10, 1)
// __syncthreads();
}
if(ty==0) {
ds[yi] = -1 * sum_ds;
for(int ii=0 ; ii< LEN_F ; ii++) {
int idx = ii * 10 + tx;
single_dw[idx] += x[ii] * ds[tx];
}
}
__syncthreads(); // wait for all 10 threads to finish the single_dw matrix.
}//End--of--Data-Point
if(ty==0) {
for(int ii=0 ; ii< LEN_F ; ii++) {
int idx = ii* 10 + tx;
tmp_w = weights[idx];
tmp_dw = single_dw[idx]/total_examples + 2 * reg_strength * tmp_w;
W_square_single += tmp_w * tmp_w; // calculate for one column of W, for computing loss
weights[idx] = tmp_w - learning_rate * tmp_dw;
}
// 10 threads add to W_square;
atomicAdd(&W_square, W_square_single);
// __syncthreads();
if (tx == 0) { // only one thread is calculating the loss
loss[0] = sum_loss/total_examples + reg_strength * W_square;
}
}//end--of--ty
__syncthreads();
}//End--of--global | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3sgdPfS_S_S_ffiS_S_
.globl _Z3sgdPfS_S_S_ffiS_S_
.p2align 8
.type _Z3sgdPfS_S_S_ffiS_S_,@function
_Z3sgdPfS_S_S_ffiS_S_:
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_or_b32_e32 v2, v1, v0
v_cmpx_eq_u32_e32 0, v2
s_cbranch_execz .LBB0_2
v_mov_b32_e32 v2, 0
s_movk_i32 s3, 0x400
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_add_nc_u32_e32 v3, s3, v2
ds_store_2addr_b32 v3, v2, v2 offset0:107 offset1:118
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s2
s_clause 0x1
s_load_b32 s18, s[0:1], 0x28
s_load_b128 s[4:7], s[0:1], 0x10
v_cmp_eq_u32_e32 vcc_lo, 0, v0
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s18, 1
s_cbranch_scc1 .LBB0_39
s_clause 0x1
s_load_b64 s[12:13], s[0:1], 0x30
s_load_b128 s[8:11], s[0:1], 0x0
v_dual_mov_b32 v18, 0 :: v_dual_lshlrev_b32 v11, 2, v1
v_lshl_add_u32 v12, v0, 2, 0x500
v_mov_b32_e32 v3, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_dual_mov_b32 v13, 1.0 :: v_dual_add_nc_u32 v16, 0x580, v11
v_add_co_u32 v4, s2, s6, v11
v_add_co_ci_u32_e64 v5, null, s7, 0, s2
v_mad_u32_u24 v14, v0, 40, v11
v_add_nc_u32_e32 v15, 0x5e0, v11
v_add_nc_u32_e32 v17, 0x5b0, v11
s_mov_b32 s15, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_mov_b32 s14, s15
s_waitcnt lgkmcnt(0)
v_add_co_u32 v6, s2, s12, v11
v_add_co_ci_u32_e64 v7, null, s13, 0, s2
s_branch .LBB0_5
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s3
s_add_i32 s14, s14, 1
s_waitcnt_vscnt null, 0x0
s_cmp_lg_u32 s14, s18
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_39
.LBB0_5:
v_mad_u64_u32 v[9:10], null, s14, 0xc01, v[0:1]
s_mov_b32 s16, 0
.LBB0_6:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b32 s17, s16, 5
v_add_nc_u32_e32 v2, s17, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_lt_u32_e64 s2, 0xc00, v2
s_and_saveexec_b32 s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s3, exec_lo, s3
s_cbranch_execz .LBB0_8
ds_store_b32 v14, v3
.LBB0_8:
s_and_not1_saveexec_b32 s19, s3
s_cbranch_execz .LBB0_10
v_mad_u64_u32 v[19:20], null, v2, 10, v[1:2]
v_mov_b32_e32 v20, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[19:20], 2, v[19:20]
v_add_co_u32 v19, s3, s4, v19
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v20, s3, s5, v20, s3
global_load_b32 v2, v[19:20], off
s_waitcnt vmcnt(0)
ds_store_b32 v14, v2
.LBB0_10:
s_or_b32 exec_lo, exec_lo, s19
s_and_saveexec_b32 s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s2, exec_lo, s3
s_cbranch_execz .LBB0_12
ds_store_b32 v12, v3
.LBB0_12:
s_and_not1_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_14
v_add_nc_u32_e32 v2, s17, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[19:20], 2, v[2:3]
v_add_co_u32 v19, s2, s8, v19
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v20, s2, s9, v20, s2
global_load_b32 v2, v[19:20], off
s_waitcnt vmcnt(0)
ds_store_b32 v12, v2
.LBB0_14:
s_or_b32 exec_lo, exec_lo, s3
v_mov_b32_e32 v2, v11
s_movk_i32 s2, 0x500
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.LBB0_15:
v_mov_b32_e32 v10, s2
s_add_i32 s2, s2, 4
ds_load_b32 v10, v10
ds_load_b32 v19, v2
v_add_nc_u32_e32 v2, 40, v2
s_cmpk_eq_i32 s2, 0x580
s_waitcnt lgkmcnt(0)
v_fmac_f32_e32 v18, v10, v19
s_cbranch_scc0 .LBB0_15
s_add_i32 s16, s16, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmpk_eq_i32 s16, 0x61
s_cbranch_scc0 .LBB0_6
global_store_b32 v[6:7], v18, off
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB0_19
s_lshl_b64 s[16:17], s[14:15], 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
s_add_u32 s16, s10, s16
s_addc_u32 s17, s11, s17
global_load_b32 v2, v3, s[16:17]
s_waitcnt vmcnt(0)
v_cvt_i32_f32_e32 v8, v2
v_ashrrev_i32_e32 v9, 31, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[9:10], 2, v[8:9]
v_add_co_u32 v9, s2, s12, v9
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v10, s2, s13, v10, s2
s_clause 0x1
global_load_b32 v2, v[6:7], off
global_load_b32 v9, v[9:10], off
s_waitcnt vmcnt(0)
v_sub_f32_e32 v2, v2, v9
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v2, 1.0, v2
ds_store_b32 v15, v2
.LBB0_19:
s_or_b32 exec_lo, exec_lo, s3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB0_28
ds_load_b32 v2, v15
s_waitcnt lgkmcnt(0)
v_cmp_nlt_f32_e64 s2, 0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s16, s2
s_xor_b32 s2, exec_lo, s16
s_cbranch_execz .LBB0_22
ds_store_b32 v16, v3
.LBB0_22:
s_and_not1_saveexec_b32 s2, s2
s_cbranch_execz .LBB0_24
ds_store_b32 v16, v13
.LBB0_24:
s_or_b32 exec_lo, exec_lo, s2
v_lshlrev_b32_e32 v2, 2, v8
s_mov_b32 s2, exec_lo
ds_store_b32 v2, v3 offset:1408
ds_load_b32 v9, v16
v_bfrev_b32_e32 v2, 1
.LBB0_25:
s_ctz_i32_b32 s16, s2
s_waitcnt lgkmcnt(0)
v_readlane_b32 s17, v9, s16
s_lshl_b32 s16, 1, s16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_not1_b32 s2, s2, s16
s_cmp_lg_u32 s2, 0
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v2, s17, v2
s_cbranch_scc1 .LBB0_25
v_mbcnt_lo_u32_b32 v9, exec_lo, 0
s_mov_b32 s16, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v9
s_xor_b32 s16, exec_lo, s16
s_cbranch_execz .LBB0_28
ds_add_f32 v3, v2 offset:1448
.LBB0_28:
s_or_b32 exec_lo, exec_lo, s3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB0_4
ds_load_b32 v2, v15
s_waitcnt lgkmcnt(0)
v_cmp_nlt_f32_e64 s2, 0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s16, s2
s_xor_b32 s2, exec_lo, s16
s_cbranch_execz .LBB0_31
ds_store_b32 v17, v3
.LBB0_31:
s_and_not1_saveexec_b32 s2, s2
s_cbranch_execz .LBB0_33
ds_store_b32 v17, v2
.LBB0_33:
s_or_b32 exec_lo, exec_lo, s2
v_lshlrev_b32_e32 v2, 2, v8
v_bfrev_b32_e32 v9, 1
s_mov_b32 s2, exec_lo
ds_store_b32 v2, v3 offset:1456
ds_load_b32 v10, v17
.LBB0_34:
s_ctz_i32_b32 s16, s2
s_waitcnt lgkmcnt(0)
v_readlane_b32 s17, v10, s16
s_lshl_b32 s16, 1, s16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_not1_b32 s2, s2, s16
s_cmp_lg_u32 s2, 0
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v9, s17, v9
s_cbranch_scc1 .LBB0_34
v_mbcnt_lo_u32_b32 v10, exec_lo, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s2, 0, v10
s_and_saveexec_b32 s16, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s2, exec_lo, s16
s_cbranch_execz .LBB0_37
ds_add_f32 v3, v9 offset:1496
.LBB0_37:
s_or_b32 exec_lo, exec_lo, s2
ds_load_b32 v9, v3 offset:1448
s_mov_b64 s[16:17], 0
s_waitcnt lgkmcnt(0)
v_xor_b32_e32 v9, 0x80000000, v9
ds_store_b32 v2, v9 offset:1408
ds_load_b32 v2, v16
v_dual_mov_b32 v10, v5 :: v_dual_mov_b32 v9, v4
.p2align 6
.LBB0_38:
s_add_u32 s20, s8, s16
s_addc_u32 s21, s9, s17
global_load_b32 v19, v[9:10], off
global_load_b32 v20, v3, s[20:21]
s_add_u32 s16, s16, 4
s_addc_u32 s17, s17, 0
s_cmpk_lg_i32 s16, 0x3004
s_waitcnt vmcnt(0) lgkmcnt(0)
v_fmac_f32_e32 v19, v20, v2
global_store_b32 v[9:10], v19, off
v_add_co_u32 v9, s2, v9, 40
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v10, s2, 0, v10, s2
s_cbranch_scc1 .LBB0_38
s_branch .LBB0_4
.LBB0_39:
s_and_saveexec_b32 s8, vcc_lo
s_cbranch_execz .LBB0_48
s_load_b64 s[2:3], s[0:1], 0x20
v_lshlrev_b32_e32 v6, 2, v1
v_cvt_f32_i32_e32 v0, s18
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v3, s4, s4, v6
v_add_co_ci_u32_e64 v4, null, s5, 0, s4
v_add_co_u32 v6, s4, s6, v6
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v7, null, s7, 0, s4
s_mov_b64 s[4:5], 0
s_waitcnt lgkmcnt(0)
v_add_f32_e64 v5, s2, s2
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_41:
v_add_co_u32 v8, vcc_lo, v6, s4
v_add_co_ci_u32_e32 v9, vcc_lo, s5, v7, vcc_lo
global_load_b32 v10, v[8:9], off
v_add_co_u32 v8, vcc_lo, v3, s4
v_add_co_ci_u32_e32 v9, vcc_lo, s5, v4, vcc_lo
s_add_u32 s4, s4, 40
s_addc_u32 s5, s5, 0
s_cmp_eq_u32 s4, 0x1e028
global_load_b32 v11, v[8:9], off
s_waitcnt vmcnt(1)
v_div_scale_f32 v12, null, v0, v0, v10
v_div_scale_f32 v15, vcc_lo, v10, v0, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v13, v12
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v2, v11, v11
s_waitcnt_depctr 0xfff
v_fma_f32 v14, -v12, v13, 1.0
v_fmac_f32_e32 v13, v14, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v14, v15, v13
v_fma_f32 v16, -v12, v14, v15
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v14, v16, v13
v_fma_f32 v12, -v12, v14, v15
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f32 v12, v12, v13, v14
v_div_fixup_f32 v10, v12, v0, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v10, v5, v11
v_fma_f32 v10, -v10, s3, v11
global_store_b32 v[8:9], v10, off
s_cbranch_scc0 .LBB0_41
s_set_inst_prefetch_distance 0x2
v_bfrev_b32_e32 v3, 1
s_mov_b32 s3, exec_lo
.LBB0_43:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ctz_i32_b32 s4, s3
v_readlane_b32 s5, v2, s4
s_lshl_b32 s4, 1, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_not1_b32 s3, s3, s4
s_cmp_lg_u32 s3, 0
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v3, s5, v3
s_cbranch_scc1 .LBB0_43
v_mbcnt_lo_u32_b32 v2, exec_lo, 0
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v2
s_xor_b32 s3, exec_lo, s3
s_cbranch_execz .LBB0_46
v_mov_b32_e32 v2, 0
ds_add_f32 v2, v3 offset:1452
.LBB0_46:
s_or_b32 exec_lo, exec_lo, s3
v_cmp_eq_u32_e32 vcc_lo, 0, v1
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_48
v_mov_b32_e32 v3, 0
s_movk_i32 s3, 0x400
s_load_b64 s[0:1], s[0:1], 0x38
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, s3, v3
ds_load_2addr_b32 v[1:2], v1 offset0:107 offset1:118
s_waitcnt lgkmcnt(0)
v_div_scale_f32 v4, null, v0, v0, v2
v_rcp_f32_e32 v5, v4
s_waitcnt_depctr 0xfff
v_fma_f32 v6, -v4, v5, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v5, v6, v5
v_div_scale_f32 v6, vcc_lo, v2, v0, v2
v_mul_f32_e32 v7, v6, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v8, -v4, v7, v6
v_fmac_f32_e32 v7, v8, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v4, -v4, v7, v6
v_div_fmas_f32 v4, v4, v5, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f32 v0, v4, v0, v2
v_fmac_f32_e32 v0, s2, v1
global_store_b32 v3, v0, s[0:1]
.LBB0_48:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt lgkmcnt(0)
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3sgdPfS_S_S_ffiS_S_
.amdhsa_group_segment_fixed_size 1544
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 64
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 21
.amdhsa_next_free_sgpr 22
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3sgdPfS_S_S_ffiS_S_, .Lfunc_end0-_Z3sgdPfS_S_S_ffiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 1544
.kernarg_segment_align: 8
.kernarg_segment_size: 64
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3sgdPfS_S_S_ffiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 24
.sgpr_spill_count: 0
.symbol: _Z3sgdPfS_S_S_ffiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 21
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define LEN_F 3073
#define TILE_WIDTH 32
// 3073/32 = 97.
__global__ void sgd(float *x, float* y, float* weights,
float *single_dw, /* dw computed by one data point, with size (3073, 10) */
float reg_strength,
float learning_rate,
int total_examples,
float *dot_XW,
float *loss) /* dot_XW is with size (10, 1) */
{
/* blockDim.x = 10, blockDim.y = 32 */
int tx = threadIdx.x; //10
int ty = threadIdx.y; //32
float tmp_w, tmp_dw;
int yi, t, data_point;
__shared__ float weights_shared[TILE_WIDTH][10];
__shared__ float x_shared[TILE_WIDTH];
__shared__ float ds[10];
__shared__ float sum_ds;
__shared__ float distance[10];
__shared__ float loss_i[10];
__shared__ float W_square;
__shared__ float sum_loss;
float W_square_single = 0;
float sum_value=0;
// 2D block, (10, 32, 1)
if (tx == 0 && ty ==0) {
sum_loss = 0;
W_square = 0;
}
for(data_point =0; data_point < total_examples; data_point++) {
for (t = 0; t < (LEN_F-1)/TILE_WIDTH + 1; t++) {
if ((t * TILE_WIDTH + ty) < LEN_F)
weights_shared[ty][tx] = weights[(t * TILE_WIDTH + ty)* 10 + tx];
else
weights_shared[ty][tx] =0;
if( (t*TILE_WIDTH+ty) < LEN_F)
x_shared[ty] = x[data_point * LEN_F + t *TILE_WIDTH + ty];
else
x_shared[ty]=0;
__syncthreads();
for(int k=0 ; k < TILE_WIDTH; k++)
sum_value+= x_shared[k] * weights_shared[k][tx];
}//end--of--tile
// tx is the indexing of column {0, 1, 2, ..., 9}
dot_XW[tx] = sum_value;
__syncthreads();
// dot_XW should finish updating by all threads
if(ty==0) {
yi = (int) y[data_point]; //6
distance[tx] = dot_XW[tx] - dot_XW[yi] + 1;
}
__syncthreads();
if(ty==0) {
if (distance[tx] > 0) {
ds[tx] = 1;
} else {
ds[tx] = 0;
}
ds[yi] = 0;
atomicAdd(&sum_ds, ds[tx]);
}
__syncthreads();
// calculating loss by accumulating 200 data point.
if(ty==0) {
if (distance[tx] > 0) {
loss_i[tx] = distance[tx];
} else {
loss_i[tx] = 0;
}
loss_i[yi] = 0;
atomicAdd(&sum_loss, loss_i[tx]); //loss_i is (10, 1)
// __syncthreads();
}
if(ty==0) {
ds[yi] = -1 * sum_ds;
for(int ii=0 ; ii< LEN_F ; ii++) {
int idx = ii * 10 + tx;
single_dw[idx] += x[ii] * ds[tx];
}
}
__syncthreads(); // wait for all 10 threads to finish the single_dw matrix.
}//End--of--Data-Point
if(ty==0) {
for(int ii=0 ; ii< LEN_F ; ii++) {
int idx = ii* 10 + tx;
tmp_w = weights[idx];
tmp_dw = single_dw[idx]/total_examples + 2 * reg_strength * tmp_w;
W_square_single += tmp_w * tmp_w; // calculate for one column of W, for computing loss
weights[idx] = tmp_w - learning_rate * tmp_dw;
}
// 10 threads add to W_square;
atomicAdd(&W_square, W_square_single);
// __syncthreads();
if (tx == 0) { // only one thread is calculating the loss
loss[0] = sum_loss/total_examples + reg_strength * W_square;
}
}//end--of--ty
__syncthreads();
}//End--of--global | .text
.file "sgd_cifar_single_blk_normal_mult.hip"
.globl _Z18__device_stub__sgdPfS_S_S_ffiS_S_ # -- Begin function _Z18__device_stub__sgdPfS_S_S_ffiS_S_
.p2align 4, 0x90
.type _Z18__device_stub__sgdPfS_S_S_ffiS_S_,@function
_Z18__device_stub__sgdPfS_S_S_ffiS_S_: # @_Z18__device_stub__sgdPfS_S_S_ffiS_S_
.cfi_startproc
# %bb.0:
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 104(%rsp)
movq %rsi, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movss %xmm0, 20(%rsp)
movss %xmm1, 16(%rsp)
movl %r8d, 12(%rsp)
movq %r9, 72(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 12(%rsp), %rax
movq %rax, 160(%rsp)
leaq 72(%rsp), %rax
movq %rax, 168(%rsp)
leaq 192(%rsp), %rax
movq %rax, 176(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z3sgdPfS_S_S_ffiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $200, %rsp
.cfi_adjust_cfa_offset -200
retq
.Lfunc_end0:
.size _Z18__device_stub__sgdPfS_S_S_ffiS_S_, .Lfunc_end0-_Z18__device_stub__sgdPfS_S_S_ffiS_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3sgdPfS_S_S_ffiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3sgdPfS_S_S_ffiS_S_,@object # @_Z3sgdPfS_S_S_ffiS_S_
.section .rodata,"a",@progbits
.globl _Z3sgdPfS_S_S_ffiS_S_
.p2align 3, 0x0
_Z3sgdPfS_S_S_ffiS_S_:
.quad _Z18__device_stub__sgdPfS_S_S_ffiS_S_
.size _Z3sgdPfS_S_S_ffiS_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z3sgdPfS_S_S_ffiS_S_"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__sgdPfS_S_S_ffiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3sgdPfS_S_S_ffiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000d3682_00000000-6_sgd_cifar_single_blk_normal_mult.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z3sgdPfS_S_S_ffiS_S_PfS_S_S_ffiS_S_
.type _Z35__device_stub__Z3sgdPfS_S_S_ffiS_S_PfS_S_S_ffiS_S_, @function
_Z35__device_stub__Z3sgdPfS_S_S_ffiS_S_PfS_S_S_ffiS_S_:
.LFB2081:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movss %xmm0, 28(%rsp)
movss %xmm1, 24(%rsp)
movl %r8d, 20(%rsp)
movq %r9, 8(%rsp)
movq 224(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 28(%rsp), %rax
movq %rax, 160(%rsp)
leaq 24(%rsp), %rax
movq %rax, 168(%rsp)
leaq 20(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
movq %rsp, %rax
movq %rax, 192(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z3sgdPfS_S_S_ffiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z35__device_stub__Z3sgdPfS_S_S_ffiS_S_PfS_S_S_ffiS_S_, .-_Z35__device_stub__Z3sgdPfS_S_S_ffiS_S_PfS_S_S_ffiS_S_
.globl _Z3sgdPfS_S_S_ffiS_S_
.type _Z3sgdPfS_S_S_ffiS_S_, @function
_Z3sgdPfS_S_S_ffiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z35__device_stub__Z3sgdPfS_S_S_ffiS_S_PfS_S_S_ffiS_S_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _Z3sgdPfS_S_S_ffiS_S_, .-_Z3sgdPfS_S_S_ffiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z3sgdPfS_S_S_ffiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z3sgdPfS_S_S_ffiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "sgd_cifar_single_blk_normal_mult.hip"
.globl _Z18__device_stub__sgdPfS_S_S_ffiS_S_ # -- Begin function _Z18__device_stub__sgdPfS_S_S_ffiS_S_
.p2align 4, 0x90
.type _Z18__device_stub__sgdPfS_S_S_ffiS_S_,@function
_Z18__device_stub__sgdPfS_S_S_ffiS_S_: # @_Z18__device_stub__sgdPfS_S_S_ffiS_S_
.cfi_startproc
# %bb.0:
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 104(%rsp)
movq %rsi, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movss %xmm0, 20(%rsp)
movss %xmm1, 16(%rsp)
movl %r8d, 12(%rsp)
movq %r9, 72(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 12(%rsp), %rax
movq %rax, 160(%rsp)
leaq 72(%rsp), %rax
movq %rax, 168(%rsp)
leaq 192(%rsp), %rax
movq %rax, 176(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z3sgdPfS_S_S_ffiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $200, %rsp
.cfi_adjust_cfa_offset -200
retq
.Lfunc_end0:
.size _Z18__device_stub__sgdPfS_S_S_ffiS_S_, .Lfunc_end0-_Z18__device_stub__sgdPfS_S_S_ffiS_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3sgdPfS_S_S_ffiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3sgdPfS_S_S_ffiS_S_,@object # @_Z3sgdPfS_S_S_ffiS_S_
.section .rodata,"a",@progbits
.globl _Z3sgdPfS_S_S_ffiS_S_
.p2align 3, 0x0
_Z3sgdPfS_S_S_ffiS_S_:
.quad _Z18__device_stub__sgdPfS_S_S_ffiS_S_
.size _Z3sgdPfS_S_S_ffiS_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z3sgdPfS_S_S_ffiS_S_"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__sgdPfS_S_S_ffiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3sgdPfS_S_S_ffiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | __global__ void FlexFDM1D_naive(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Naive version where only global memory and automatic variables are accessed.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// Insert code below this line.
}
//
// Kernel v2
//
__global__ void FlexFDM1D_v2(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Improved version where shared memory is used to reduce global memory accesses.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// - Utilize shared memory
// Insert code below this line.
}
//
// Kernel v3
//
__global__ void FlexFDM1D_v3(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Improved version where shared memory is used to reduce global memory accesses.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// - Utilize shared memory
// - Utilize constant memory for stencils coefficients
// Insert code below this line.
} | code for sm_80
Function : _Z12FlexFDM1D_v3PfS_iiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z12FlexFDM1D_v2PfS_iiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z15FlexFDM1D_naivePfS_iiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | __global__ void FlexFDM1D_naive(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Naive version where only global memory and automatic variables are accessed.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// Insert code below this line.
}
//
// Kernel v2
//
__global__ void FlexFDM1D_v2(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Improved version where shared memory is used to reduce global memory accesses.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// - Utilize shared memory
// Insert code below this line.
}
//
// Kernel v3
//
__global__ void FlexFDM1D_v3(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Improved version where shared memory is used to reduce global memory accesses.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// - Utilize shared memory
// - Utilize constant memory for stencils coefficients
// Insert code below this line.
} | .file "tmpxft_000805ed_00000000-6_FlexFDM1D_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z41__device_stub__Z15FlexFDM1D_naivePfS_iiS_PfS_iiS_
.type _Z41__device_stub__Z15FlexFDM1D_naivePfS_iiS_PfS_iiS_, @function
_Z41__device_stub__Z15FlexFDM1D_naivePfS_iiS_PfS_iiS_:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z15FlexFDM1D_naivePfS_iiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z41__device_stub__Z15FlexFDM1D_naivePfS_iiS_PfS_iiS_, .-_Z41__device_stub__Z15FlexFDM1D_naivePfS_iiS_PfS_iiS_
.globl _Z15FlexFDM1D_naivePfS_iiS_
.type _Z15FlexFDM1D_naivePfS_iiS_, @function
_Z15FlexFDM1D_naivePfS_iiS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z15FlexFDM1D_naivePfS_iiS_PfS_iiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15FlexFDM1D_naivePfS_iiS_, .-_Z15FlexFDM1D_naivePfS_iiS_
.globl _Z38__device_stub__Z12FlexFDM1D_v2PfS_iiS_PfS_iiS_
.type _Z38__device_stub__Z12FlexFDM1D_v2PfS_iiS_PfS_iiS_, @function
_Z38__device_stub__Z12FlexFDM1D_v2PfS_iiS_PfS_iiS_:
.LFB2053:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12FlexFDM1D_v2PfS_iiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z38__device_stub__Z12FlexFDM1D_v2PfS_iiS_PfS_iiS_, .-_Z38__device_stub__Z12FlexFDM1D_v2PfS_iiS_PfS_iiS_
.globl _Z12FlexFDM1D_v2PfS_iiS_
.type _Z12FlexFDM1D_v2PfS_iiS_, @function
_Z12FlexFDM1D_v2PfS_iiS_:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z12FlexFDM1D_v2PfS_iiS_PfS_iiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z12FlexFDM1D_v2PfS_iiS_, .-_Z12FlexFDM1D_v2PfS_iiS_
.globl _Z38__device_stub__Z12FlexFDM1D_v3PfS_iiS_PfS_iiS_
.type _Z38__device_stub__Z12FlexFDM1D_v3PfS_iiS_PfS_iiS_, @function
_Z38__device_stub__Z12FlexFDM1D_v3PfS_iiS_PfS_iiS_:
.LFB2055:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12FlexFDM1D_v3PfS_iiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2055:
.size _Z38__device_stub__Z12FlexFDM1D_v3PfS_iiS_PfS_iiS_, .-_Z38__device_stub__Z12FlexFDM1D_v3PfS_iiS_PfS_iiS_
.globl _Z12FlexFDM1D_v3PfS_iiS_
.type _Z12FlexFDM1D_v3PfS_iiS_, @function
_Z12FlexFDM1D_v3PfS_iiS_:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z12FlexFDM1D_v3PfS_iiS_PfS_iiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _Z12FlexFDM1D_v3PfS_iiS_, .-_Z12FlexFDM1D_v3PfS_iiS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z12FlexFDM1D_v3PfS_iiS_"
.LC1:
.string "_Z12FlexFDM1D_v2PfS_iiS_"
.LC2:
.string "_Z15FlexFDM1D_naivePfS_iiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z12FlexFDM1D_v3PfS_iiS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z12FlexFDM1D_v2PfS_iiS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z15FlexFDM1D_naivePfS_iiS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | __global__ void FlexFDM1D_naive(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Naive version where only global memory and automatic variables are accessed.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// Insert code below this line.
}
//
// Kernel v2
//
__global__ void FlexFDM1D_v2(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Improved version where shared memory is used to reduce global memory accesses.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// - Utilize shared memory
// Insert code below this line.
}
//
// Kernel v3
//
__global__ void FlexFDM1D_v3(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Improved version where shared memory is used to reduce global memory accesses.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// - Utilize shared memory
// - Utilize constant memory for stencils coefficients
// Insert code below this line.
} | #include <hip/hip_runtime.h>
__global__ void FlexFDM1D_naive(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Naive version where only global memory and automatic variables are accessed.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// Insert code below this line.
}
//
// Kernel v2
//
__global__ void FlexFDM1D_v2(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Improved version where shared memory is used to reduce global memory accesses.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// - Utilize shared memory
// Insert code below this line.
}
//
// Kernel v3
//
__global__ void FlexFDM1D_v3(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Improved version where shared memory is used to reduce global memory accesses.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// - Utilize shared memory
// - Utilize constant memory for stencils coefficients
// Insert code below this line.
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
__global__ void FlexFDM1D_naive(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Naive version where only global memory and automatic variables are accessed.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// Insert code below this line.
}
//
// Kernel v2
//
__global__ void FlexFDM1D_v2(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Improved version where shared memory is used to reduce global memory accesses.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// - Utilize shared memory
// Insert code below this line.
}
//
// Kernel v3
//
__global__ void FlexFDM1D_v3(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Improved version where shared memory is used to reduce global memory accesses.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// - Utilize shared memory
// - Utilize constant memory for stencils coefficients
// Insert code below this line.
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15FlexFDM1D_naivePfS_iiS_
.globl _Z15FlexFDM1D_naivePfS_iiS_
.p2align 8
.type _Z15FlexFDM1D_naivePfS_iiS_,@function
_Z15FlexFDM1D_naivePfS_iiS_:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15FlexFDM1D_naivePfS_iiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15FlexFDM1D_naivePfS_iiS_, .Lfunc_end0-_Z15FlexFDM1D_naivePfS_iiS_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z12FlexFDM1D_v2PfS_iiS_
.globl _Z12FlexFDM1D_v2PfS_iiS_
.p2align 8
.type _Z12FlexFDM1D_v2PfS_iiS_,@function
_Z12FlexFDM1D_v2PfS_iiS_:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12FlexFDM1D_v2PfS_iiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z12FlexFDM1D_v2PfS_iiS_, .Lfunc_end1-_Z12FlexFDM1D_v2PfS_iiS_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z12FlexFDM1D_v3PfS_iiS_
.globl _Z12FlexFDM1D_v3PfS_iiS_
.p2align 8
.type _Z12FlexFDM1D_v3PfS_iiS_,@function
_Z12FlexFDM1D_v3PfS_iiS_:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12FlexFDM1D_v3PfS_iiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z12FlexFDM1D_v3PfS_iiS_, .Lfunc_end2-_Z12FlexFDM1D_v3PfS_iiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15FlexFDM1D_naivePfS_iiS_
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z15FlexFDM1D_naivePfS_iiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12FlexFDM1D_v2PfS_iiS_
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z12FlexFDM1D_v2PfS_iiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12FlexFDM1D_v3PfS_iiS_
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z12FlexFDM1D_v3PfS_iiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
__global__ void FlexFDM1D_naive(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Naive version where only global memory and automatic variables are accessed.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// Insert code below this line.
}
//
// Kernel v2
//
__global__ void FlexFDM1D_v2(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Improved version where shared memory is used to reduce global memory accesses.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// - Utilize shared memory
// Insert code below this line.
}
//
// Kernel v3
//
__global__ void FlexFDM1D_v3(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Improved version where shared memory is used to reduce global memory accesses.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// - Utilize shared memory
// - Utilize constant memory for stencils coefficients
// Insert code below this line.
} | .text
.file "FlexFDM1D_kernel.hip"
.globl _Z30__device_stub__FlexFDM1D_naivePfS_iiS_ # -- Begin function _Z30__device_stub__FlexFDM1D_naivePfS_iiS_
.p2align 4, 0x90
.type _Z30__device_stub__FlexFDM1D_naivePfS_iiS_,@function
_Z30__device_stub__FlexFDM1D_naivePfS_iiS_: # @_Z30__device_stub__FlexFDM1D_naivePfS_iiS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movl %ecx, (%rsp)
movq %r8, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsp, %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15FlexFDM1D_naivePfS_iiS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z30__device_stub__FlexFDM1D_naivePfS_iiS_, .Lfunc_end0-_Z30__device_stub__FlexFDM1D_naivePfS_iiS_
.cfi_endproc
# -- End function
.globl _Z27__device_stub__FlexFDM1D_v2PfS_iiS_ # -- Begin function _Z27__device_stub__FlexFDM1D_v2PfS_iiS_
.p2align 4, 0x90
.type _Z27__device_stub__FlexFDM1D_v2PfS_iiS_,@function
_Z27__device_stub__FlexFDM1D_v2PfS_iiS_: # @_Z27__device_stub__FlexFDM1D_v2PfS_iiS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movl %ecx, (%rsp)
movq %r8, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsp, %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12FlexFDM1D_v2PfS_iiS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z27__device_stub__FlexFDM1D_v2PfS_iiS_, .Lfunc_end1-_Z27__device_stub__FlexFDM1D_v2PfS_iiS_
.cfi_endproc
# -- End function
.globl _Z27__device_stub__FlexFDM1D_v3PfS_iiS_ # -- Begin function _Z27__device_stub__FlexFDM1D_v3PfS_iiS_
.p2align 4, 0x90
.type _Z27__device_stub__FlexFDM1D_v3PfS_iiS_,@function
_Z27__device_stub__FlexFDM1D_v3PfS_iiS_: # @_Z27__device_stub__FlexFDM1D_v3PfS_iiS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movl %ecx, (%rsp)
movq %r8, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsp, %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12FlexFDM1D_v3PfS_iiS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z27__device_stub__FlexFDM1D_v3PfS_iiS_, .Lfunc_end2-_Z27__device_stub__FlexFDM1D_v3PfS_iiS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15FlexFDM1D_naivePfS_iiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12FlexFDM1D_v2PfS_iiS_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12FlexFDM1D_v3PfS_iiS_, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15FlexFDM1D_naivePfS_iiS_,@object # @_Z15FlexFDM1D_naivePfS_iiS_
.section .rodata,"a",@progbits
.globl _Z15FlexFDM1D_naivePfS_iiS_
.p2align 3, 0x0
_Z15FlexFDM1D_naivePfS_iiS_:
.quad _Z30__device_stub__FlexFDM1D_naivePfS_iiS_
.size _Z15FlexFDM1D_naivePfS_iiS_, 8
.type _Z12FlexFDM1D_v2PfS_iiS_,@object # @_Z12FlexFDM1D_v2PfS_iiS_
.globl _Z12FlexFDM1D_v2PfS_iiS_
.p2align 3, 0x0
_Z12FlexFDM1D_v2PfS_iiS_:
.quad _Z27__device_stub__FlexFDM1D_v2PfS_iiS_
.size _Z12FlexFDM1D_v2PfS_iiS_, 8
.type _Z12FlexFDM1D_v3PfS_iiS_,@object # @_Z12FlexFDM1D_v3PfS_iiS_
.globl _Z12FlexFDM1D_v3PfS_iiS_
.p2align 3, 0x0
_Z12FlexFDM1D_v3PfS_iiS_:
.quad _Z27__device_stub__FlexFDM1D_v3PfS_iiS_
.size _Z12FlexFDM1D_v3PfS_iiS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15FlexFDM1D_naivePfS_iiS_"
.size .L__unnamed_1, 28
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z12FlexFDM1D_v2PfS_iiS_"
.size .L__unnamed_2, 25
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z12FlexFDM1D_v3PfS_iiS_"
.size .L__unnamed_3, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__FlexFDM1D_naivePfS_iiS_
.addrsig_sym _Z27__device_stub__FlexFDM1D_v2PfS_iiS_
.addrsig_sym _Z27__device_stub__FlexFDM1D_v3PfS_iiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15FlexFDM1D_naivePfS_iiS_
.addrsig_sym _Z12FlexFDM1D_v2PfS_iiS_
.addrsig_sym _Z12FlexFDM1D_v3PfS_iiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z12FlexFDM1D_v3PfS_iiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z12FlexFDM1D_v2PfS_iiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z15FlexFDM1D_naivePfS_iiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15FlexFDM1D_naivePfS_iiS_
.globl _Z15FlexFDM1D_naivePfS_iiS_
.p2align 8
.type _Z15FlexFDM1D_naivePfS_iiS_,@function
_Z15FlexFDM1D_naivePfS_iiS_:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15FlexFDM1D_naivePfS_iiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15FlexFDM1D_naivePfS_iiS_, .Lfunc_end0-_Z15FlexFDM1D_naivePfS_iiS_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z12FlexFDM1D_v2PfS_iiS_
.globl _Z12FlexFDM1D_v2PfS_iiS_
.p2align 8
.type _Z12FlexFDM1D_v2PfS_iiS_,@function
_Z12FlexFDM1D_v2PfS_iiS_:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12FlexFDM1D_v2PfS_iiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z12FlexFDM1D_v2PfS_iiS_, .Lfunc_end1-_Z12FlexFDM1D_v2PfS_iiS_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z12FlexFDM1D_v3PfS_iiS_
.globl _Z12FlexFDM1D_v3PfS_iiS_
.p2align 8
.type _Z12FlexFDM1D_v3PfS_iiS_,@function
_Z12FlexFDM1D_v3PfS_iiS_:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12FlexFDM1D_v3PfS_iiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z12FlexFDM1D_v3PfS_iiS_, .Lfunc_end2-_Z12FlexFDM1D_v3PfS_iiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15FlexFDM1D_naivePfS_iiS_
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z15FlexFDM1D_naivePfS_iiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12FlexFDM1D_v2PfS_iiS_
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z12FlexFDM1D_v2PfS_iiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12FlexFDM1D_v3PfS_iiS_
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z12FlexFDM1D_v3PfS_iiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000805ed_00000000-6_FlexFDM1D_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z41__device_stub__Z15FlexFDM1D_naivePfS_iiS_PfS_iiS_
.type _Z41__device_stub__Z15FlexFDM1D_naivePfS_iiS_PfS_iiS_, @function
_Z41__device_stub__Z15FlexFDM1D_naivePfS_iiS_PfS_iiS_:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z15FlexFDM1D_naivePfS_iiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z41__device_stub__Z15FlexFDM1D_naivePfS_iiS_PfS_iiS_, .-_Z41__device_stub__Z15FlexFDM1D_naivePfS_iiS_PfS_iiS_
.globl _Z15FlexFDM1D_naivePfS_iiS_
.type _Z15FlexFDM1D_naivePfS_iiS_, @function
_Z15FlexFDM1D_naivePfS_iiS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z15FlexFDM1D_naivePfS_iiS_PfS_iiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15FlexFDM1D_naivePfS_iiS_, .-_Z15FlexFDM1D_naivePfS_iiS_
.globl _Z38__device_stub__Z12FlexFDM1D_v2PfS_iiS_PfS_iiS_
.type _Z38__device_stub__Z12FlexFDM1D_v2PfS_iiS_PfS_iiS_, @function
_Z38__device_stub__Z12FlexFDM1D_v2PfS_iiS_PfS_iiS_:
.LFB2053:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12FlexFDM1D_v2PfS_iiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z38__device_stub__Z12FlexFDM1D_v2PfS_iiS_PfS_iiS_, .-_Z38__device_stub__Z12FlexFDM1D_v2PfS_iiS_PfS_iiS_
.globl _Z12FlexFDM1D_v2PfS_iiS_
.type _Z12FlexFDM1D_v2PfS_iiS_, @function
_Z12FlexFDM1D_v2PfS_iiS_:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z12FlexFDM1D_v2PfS_iiS_PfS_iiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z12FlexFDM1D_v2PfS_iiS_, .-_Z12FlexFDM1D_v2PfS_iiS_
.globl _Z38__device_stub__Z12FlexFDM1D_v3PfS_iiS_PfS_iiS_
.type _Z38__device_stub__Z12FlexFDM1D_v3PfS_iiS_PfS_iiS_, @function
_Z38__device_stub__Z12FlexFDM1D_v3PfS_iiS_PfS_iiS_:
.LFB2055:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12FlexFDM1D_v3PfS_iiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2055:
.size _Z38__device_stub__Z12FlexFDM1D_v3PfS_iiS_PfS_iiS_, .-_Z38__device_stub__Z12FlexFDM1D_v3PfS_iiS_PfS_iiS_
.globl _Z12FlexFDM1D_v3PfS_iiS_
.type _Z12FlexFDM1D_v3PfS_iiS_, @function
_Z12FlexFDM1D_v3PfS_iiS_:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z12FlexFDM1D_v3PfS_iiS_PfS_iiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _Z12FlexFDM1D_v3PfS_iiS_, .-_Z12FlexFDM1D_v3PfS_iiS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z12FlexFDM1D_v3PfS_iiS_"
.LC1:
.string "_Z12FlexFDM1D_v2PfS_iiS_"
.LC2:
.string "_Z15FlexFDM1D_naivePfS_iiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z12FlexFDM1D_v3PfS_iiS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z12FlexFDM1D_v2PfS_iiS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z15FlexFDM1D_naivePfS_iiS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "FlexFDM1D_kernel.hip"
.globl _Z30__device_stub__FlexFDM1D_naivePfS_iiS_ # -- Begin function _Z30__device_stub__FlexFDM1D_naivePfS_iiS_
.p2align 4, 0x90
.type _Z30__device_stub__FlexFDM1D_naivePfS_iiS_,@function
_Z30__device_stub__FlexFDM1D_naivePfS_iiS_: # @_Z30__device_stub__FlexFDM1D_naivePfS_iiS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movl %ecx, (%rsp)
movq %r8, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsp, %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15FlexFDM1D_naivePfS_iiS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z30__device_stub__FlexFDM1D_naivePfS_iiS_, .Lfunc_end0-_Z30__device_stub__FlexFDM1D_naivePfS_iiS_
.cfi_endproc
# -- End function
.globl _Z27__device_stub__FlexFDM1D_v2PfS_iiS_ # -- Begin function _Z27__device_stub__FlexFDM1D_v2PfS_iiS_
.p2align 4, 0x90
.type _Z27__device_stub__FlexFDM1D_v2PfS_iiS_,@function
_Z27__device_stub__FlexFDM1D_v2PfS_iiS_: # @_Z27__device_stub__FlexFDM1D_v2PfS_iiS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movl %ecx, (%rsp)
movq %r8, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsp, %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12FlexFDM1D_v2PfS_iiS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z27__device_stub__FlexFDM1D_v2PfS_iiS_, .Lfunc_end1-_Z27__device_stub__FlexFDM1D_v2PfS_iiS_
.cfi_endproc
# -- End function
.globl _Z27__device_stub__FlexFDM1D_v3PfS_iiS_ # -- Begin function _Z27__device_stub__FlexFDM1D_v3PfS_iiS_
.p2align 4, 0x90
.type _Z27__device_stub__FlexFDM1D_v3PfS_iiS_,@function
_Z27__device_stub__FlexFDM1D_v3PfS_iiS_: # @_Z27__device_stub__FlexFDM1D_v3PfS_iiS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movl %ecx, (%rsp)
movq %r8, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsp, %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12FlexFDM1D_v3PfS_iiS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z27__device_stub__FlexFDM1D_v3PfS_iiS_, .Lfunc_end2-_Z27__device_stub__FlexFDM1D_v3PfS_iiS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15FlexFDM1D_naivePfS_iiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12FlexFDM1D_v2PfS_iiS_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12FlexFDM1D_v3PfS_iiS_, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15FlexFDM1D_naivePfS_iiS_,@object # @_Z15FlexFDM1D_naivePfS_iiS_
.section .rodata,"a",@progbits
.globl _Z15FlexFDM1D_naivePfS_iiS_
.p2align 3, 0x0
_Z15FlexFDM1D_naivePfS_iiS_:
.quad _Z30__device_stub__FlexFDM1D_naivePfS_iiS_
.size _Z15FlexFDM1D_naivePfS_iiS_, 8
.type _Z12FlexFDM1D_v2PfS_iiS_,@object # @_Z12FlexFDM1D_v2PfS_iiS_
.globl _Z12FlexFDM1D_v2PfS_iiS_
.p2align 3, 0x0
_Z12FlexFDM1D_v2PfS_iiS_:
.quad _Z27__device_stub__FlexFDM1D_v2PfS_iiS_
.size _Z12FlexFDM1D_v2PfS_iiS_, 8
.type _Z12FlexFDM1D_v3PfS_iiS_,@object # @_Z12FlexFDM1D_v3PfS_iiS_
.globl _Z12FlexFDM1D_v3PfS_iiS_
.p2align 3, 0x0
_Z12FlexFDM1D_v3PfS_iiS_:
.quad _Z27__device_stub__FlexFDM1D_v3PfS_iiS_
.size _Z12FlexFDM1D_v3PfS_iiS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15FlexFDM1D_naivePfS_iiS_"
.size .L__unnamed_1, 28
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z12FlexFDM1D_v2PfS_iiS_"
.size .L__unnamed_2, 25
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z12FlexFDM1D_v3PfS_iiS_"
.size .L__unnamed_3, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__FlexFDM1D_naivePfS_iiS_
.addrsig_sym _Z27__device_stub__FlexFDM1D_v2PfS_iiS_
.addrsig_sym _Z27__device_stub__FlexFDM1D_v3PfS_iiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15FlexFDM1D_naivePfS_iiS_
.addrsig_sym _Z12FlexFDM1D_v2PfS_iiS_
.addrsig_sym _Z12FlexFDM1D_v3PfS_iiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
#define BLOCK_SIZE 512
// INSERT KERNEL(S) HERE
__global__ void fillBins(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) {
extern __shared__ unsigned int private_histogram[];
//Initializing private histogram bins
int bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
private_histogram[threadIdx.x + bin_stride] = 0;
bin_stride += blockDim.x;
}
__syncthreads();
//Computation of private histogram
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while(i < num_elements) {
atomicAdd(&private_histogram[input[i]], 1);
i += stride;
}
__syncthreads();
//Merging private history bins with global history bins
bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
atomicAdd(&bins[threadIdx.x + bin_stride], private_histogram[threadIdx.x + bin_stride]);
bin_stride += blockDim.x;
}
__syncthreads();
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
// INSERT CODE HERE
dim3 dimGrid((num_elements - 1)/BLOCK_SIZE + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
fillBins<<<dimGrid, dimBlock, num_bins * sizeof(unsigned int)>>>(input, bins, num_elements, num_bins);
} | code for sm_80
Function : _Z8fillBinsPjS_jj
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e220000002100 */
/*0020*/ BSSY B0, 0x110 ; /* 0x000000e000007945 */
/* 0x000fe60003800000 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e620000002500 */
/*0040*/ ISETP.GE.U32.AND P2, PT, R7.reuse, c[0x0][0x174], PT ; /* 0x00005d0007007a0c */
/* 0x041fe40003f46070 */
/*0050*/ ISETP.GE.U32.AND P0, PT, R7, c[0x0][0x174], PT ; /* 0x00005d0007007a0c */
/* 0x000fe20003f06070 */
/*0060*/ IMAD R3, R0, c[0x0][0x0], R7 ; /* 0x0000000000037a24 */
/* 0x002fca00078e0207 */
/*0070*/ ISETP.GE.U32.AND P1, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fca0003f26070 */
/*0080*/ @P2 BRA 0x100 ; /* 0x0000007000002947 */
/* 0x000fea0003800000 */
/*0090*/ HFMA2.MMA R2, -RZ, RZ, 0, 0 ; /* 0x00000000ff027435 */
/* 0x000fe200000001ff */
/*00a0*/ IMAD.MOV.U32 R0, RZ, RZ, R7 ; /* 0x000000ffff007224 */
/* 0x000fd200078e0007 */
/*00b0*/ IADD3 R2, R2, c[0x0][0x0], RZ ; /* 0x0000000002027a10 */
/* 0x000fe20007ffe0ff */
/*00c0*/ STS [R0.X4], RZ ; /* 0x000000ff00007388 */
/* 0x0001e80000004800 */
/*00d0*/ IMAD.IADD R0, R2, 0x1, R7 ; /* 0x0000000102007824 */
/* 0x001fca00078e0207 */
/*00e0*/ ISETP.GE.U32.AND P2, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fda0003f46070 */
/*00f0*/ @!P2 BRA 0xb0 ; /* 0xffffffb00000a947 */
/* 0x000fea000383ffff */
/*0100*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0110*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0120*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0130*/ @P1 BRA 0x1e0 ; /* 0x000000a000001947 */
/* 0x000fea0003800000 */
/*0140*/ MOV R0, R3 ; /* 0x0000000300007202 */
/* 0x000fe40000000f00 */
/*0150*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*0160*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x001fcc00078e0203 */
/*0170*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0180*/ MOV R5, c[0x0][0x0] ; /* 0x0000000000057a02 */
/* 0x000fe20000000f00 */
/*0190*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe80003800000 */
/*01a0*/ IMAD R0, R5, c[0x0][0xc], R0 ; /* 0x0000030005007a24 */
/* 0x000fca00078e0200 */
/*01b0*/ ISETP.GE.U32.AND P1, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fe20003f26070 */
/*01c0*/ ATOMS.POPC.INC.32 RZ, [R2.X4+URZ] ; /* 0x0000000002ff7f8c */
/* 0x0041d8000d00403f */
/*01d0*/ @!P1 BRA 0x150 ; /* 0xffffff7000009947 */
/* 0x000fea000383ffff */
/*01e0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*01f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0200*/ @P0 BRA 0x2c0 ; /* 0x000000b000000947 */
/* 0x000fea0003800000 */
/*0210*/ IMAD.MOV.U32 R0, RZ, RZ, RZ ; /* 0x000000ffff007224 */
/* 0x000fe200078e00ff */
/*0220*/ MOV R4, R7 ; /* 0x0000000700047202 */
/* 0x000fe20000000f00 */
/*0230*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4 ; /* 0x00000004ff097424 */
/* 0x000fc800078e00ff */
/*0240*/ LDS R5, [R4.X4] ; /* 0x0000000004057984 */
/* 0x0012a20000004800 */
/*0250*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */
/* 0x000fe20007ffe0ff */
/*0260*/ IMAD.WIDE.U32 R2, R4, R9, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x001fe200078e0009 */
/*0270*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe40003800000 */
/*0280*/ IADD3 R4, R0, R7, RZ ; /* 0x0000000700047210 */
/* 0x002fc80007ffe0ff */
/*0290*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x174], PT ; /* 0x00005d0004007a0c */
/* 0x000fe20003f06070 */
/*02a0*/ RED.E.ADD.STRONG.GPU [R2.64], R5 ; /* 0x000000050200798e */
/* 0x0041d8000c10e184 */
/*02b0*/ @!P0 BRA 0x240 ; /* 0xffffff8000008947 */
/* 0x000fea000383ffff */
/*02c0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*02d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*02e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02f0*/ BRA 0x2f0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
#define BLOCK_SIZE 512
// INSERT KERNEL(S) HERE
__global__ void fillBins(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) {
extern __shared__ unsigned int private_histogram[];
//Initializing private histogram bins
int bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
private_histogram[threadIdx.x + bin_stride] = 0;
bin_stride += blockDim.x;
}
__syncthreads();
//Computation of private histogram
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while(i < num_elements) {
atomicAdd(&private_histogram[input[i]], 1);
i += stride;
}
__syncthreads();
//Merging private history bins with global history bins
bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
atomicAdd(&bins[threadIdx.x + bin_stride], private_histogram[threadIdx.x + bin_stride]);
bin_stride += blockDim.x;
}
__syncthreads();
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
// INSERT CODE HERE
dim3 dimGrid((num_elements - 1)/BLOCK_SIZE + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
fillBins<<<dimGrid, dimBlock, num_bins * sizeof(unsigned int)>>>(input, bins, num_elements, num_bins);
} | .file "tmpxft_001858c9_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z8fillBinsPjS_jjPjS_jj
.type _Z31__device_stub__Z8fillBinsPjS_jjPjS_jj, @function
_Z31__device_stub__Z8fillBinsPjS_jjPjS_jj:
.LFB2052:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8fillBinsPjS_jj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z31__device_stub__Z8fillBinsPjS_jjPjS_jj, .-_Z31__device_stub__Z8fillBinsPjS_jjPjS_jj
.globl _Z8fillBinsPjS_jj
.type _Z8fillBinsPjS_jj, @function
_Z8fillBinsPjS_jj:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z8fillBinsPjS_jjPjS_jj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z8fillBinsPjS_jj, .-_Z8fillBinsPjS_jj
.globl _Z9histogramPjS_jj
.type _Z9histogramPjS_jj, @function
_Z9histogramPjS_jj:
.LFB2027:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %r12
movq %rsi, %r13
movl %edx, %ebx
movl %ecx, %ebp
leal -1(%rdx), %eax
shrl $9, %eax
addl $1, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $512, 20(%rsp)
movl $1, 24(%rsp)
movl %ecx, %eax
movl $0, %r9d
leaq 0(,%rax,4), %r8
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L11:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movl %ebp, %ecx
movl %ebx, %edx
movq %r13, %rsi
movq %r12, %rdi
call _Z31__device_stub__Z8fillBinsPjS_jjPjS_jj
jmp .L11
.cfi_endproc
.LFE2027:
.size _Z9histogramPjS_jj, .-_Z9histogramPjS_jj
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z8fillBinsPjS_jj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8fillBinsPjS_jj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
#define BLOCK_SIZE 512
// INSERT KERNEL(S) HERE
__global__ void fillBins(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) {
extern __shared__ unsigned int private_histogram[];
//Initializing private histogram bins
int bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
private_histogram[threadIdx.x + bin_stride] = 0;
bin_stride += blockDim.x;
}
__syncthreads();
//Computation of private histogram
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while(i < num_elements) {
atomicAdd(&private_histogram[input[i]], 1);
i += stride;
}
__syncthreads();
//Merging private history bins with global history bins
bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
atomicAdd(&bins[threadIdx.x + bin_stride], private_histogram[threadIdx.x + bin_stride]);
bin_stride += blockDim.x;
}
__syncthreads();
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
// INSERT CODE HERE
dim3 dimGrid((num_elements - 1)/BLOCK_SIZE + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
fillBins<<<dimGrid, dimBlock, num_bins * sizeof(unsigned int)>>>(input, bins, num_elements, num_bins);
} | #include <hip/hip_runtime.h>
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
#define BLOCK_SIZE 512
// INSERT KERNEL(S) HERE
__global__ void fillBins(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) {
extern __shared__ unsigned int private_histogram[];
//Initializing private histogram bins
int bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
private_histogram[threadIdx.x + bin_stride] = 0;
bin_stride += blockDim.x;
}
__syncthreads();
//Computation of private histogram
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while(i < num_elements) {
atomicAdd(&private_histogram[input[i]], 1);
i += stride;
}
__syncthreads();
//Merging private history bins with global history bins
bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
atomicAdd(&bins[threadIdx.x + bin_stride], private_histogram[threadIdx.x + bin_stride]);
bin_stride += blockDim.x;
}
__syncthreads();
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
// INSERT CODE HERE
dim3 dimGrid((num_elements - 1)/BLOCK_SIZE + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
fillBins<<<dimGrid, dimBlock, num_bins * sizeof(unsigned int)>>>(input, bins, num_elements, num_bins);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
#define BLOCK_SIZE 512
// INSERT KERNEL(S) HERE
__global__ void fillBins(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) {
extern __shared__ unsigned int private_histogram[];
//Initializing private histogram bins
int bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
private_histogram[threadIdx.x + bin_stride] = 0;
bin_stride += blockDim.x;
}
__syncthreads();
//Computation of private histogram
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while(i < num_elements) {
atomicAdd(&private_histogram[input[i]], 1);
i += stride;
}
__syncthreads();
//Merging private history bins with global history bins
bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
atomicAdd(&bins[threadIdx.x + bin_stride], private_histogram[threadIdx.x + bin_stride]);
bin_stride += blockDim.x;
}
__syncthreads();
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
// INSERT CODE HERE
dim3 dimGrid((num_elements - 1)/BLOCK_SIZE + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
fillBins<<<dimGrid, dimBlock, num_bins * sizeof(unsigned int)>>>(input, bins, num_elements, num_bins);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8fillBinsPjS_jj
.globl _Z8fillBinsPjS_jj
.p2align 8
.type _Z8fillBinsPjS_jj,@function
_Z8fillBinsPjS_jj:
s_load_b32 s8, s[0:1], 0x14
s_mov_b32 s2, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_gt_u32_e64 s8, v0
s_cbranch_execz .LBB0_3
s_load_b32 s3, s[0:1], 0x24
v_lshl_add_u32 v1, v0, 2, 0
v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, v0
s_mov_b32 s4, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b32 s5, s3, 2
.LBB0_2:
v_add_nc_u32_e32 v3, s3, v3
ds_store_b32 v1, v2
v_add_nc_u32_e32 v1, s5, v1
v_cmp_le_u32_e32 vcc_lo, s8, v3
s_or_b32 s4, vcc_lo, s4
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s4
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s10, s[0:1], 0x10
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_mov_b32 s11, exec_lo
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s9, v[0:1]
v_cmpx_gt_u32_e64 s10, v1
s_cbranch_execz .LBB0_6
s_load_b32 s4, s[2:3], 0x0
s_load_b64 s[2:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v4, 1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_mul_i32 s4, s4, s9
v_add_co_u32 v2, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
s_ashr_i32 s5, s4, 31
s_mov_b32 s3, 0
s_lshl_b64 s[6:7], s[4:5], 2
.LBB0_5:
global_load_b32 v5, v[2:3], off
v_add_nc_u32_e32 v1, s4, v1
v_add_co_u32 v2, s2, v2, s6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v3, s2, s7, v3, s2
v_cmp_le_u32_e32 vcc_lo, s10, v1
s_or_b32 s3, vcc_lo, s3
s_waitcnt vmcnt(0)
v_lshl_add_u32 v5, v5, 2, 0
ds_add_u32 v5, v4
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_5
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s11
s_delay_alu instid0(SALU_CYCLE_1)
s_mov_b32 s2, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_u32_e64 s8, v0
s_cbranch_execz .LBB0_9
s_load_b64 s[0:1], s[0:1], 0x8
v_lshl_add_u32 v2, v0, 2, 0
v_mov_b32_e32 v1, 0
s_mov_b32 s3, 0
s_lshl_b32 s4, s9, 2
.LBB0_8:
ds_load_b32 v5, v2
v_lshlrev_b64 v[3:4], 2, v[0:1]
v_add_nc_u32_e32 v0, s9, v0
v_add_nc_u32_e32 v2, s4, v2
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v3, vcc_lo, s0, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s8, v0
s_or_b32 s3, vcc_lo, s3
global_atomic_add_u32 v[3:4], v5, off
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_8
.LBB0_9:
s_or_b32 exec_lo, exec_lo, s2
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8fillBinsPjS_jj
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8fillBinsPjS_jj, .Lfunc_end0-_Z8fillBinsPjS_jj
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym private_histogram
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8fillBinsPjS_jj
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8fillBinsPjS_jj.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
#define BLOCK_SIZE 512
// INSERT KERNEL(S) HERE
__global__ void fillBins(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) {
extern __shared__ unsigned int private_histogram[];
//Initializing private histogram bins
int bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
private_histogram[threadIdx.x + bin_stride] = 0;
bin_stride += blockDim.x;
}
__syncthreads();
//Computation of private histogram
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while(i < num_elements) {
atomicAdd(&private_histogram[input[i]], 1);
i += stride;
}
__syncthreads();
//Merging private history bins with global history bins
bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
atomicAdd(&bins[threadIdx.x + bin_stride], private_histogram[threadIdx.x + bin_stride]);
bin_stride += blockDim.x;
}
__syncthreads();
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
// INSERT CODE HERE
dim3 dimGrid((num_elements - 1)/BLOCK_SIZE + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
fillBins<<<dimGrid, dimBlock, num_bins * sizeof(unsigned int)>>>(input, bins, num_elements, num_bins);
} | .text
.file "kernel.hip"
.globl _Z23__device_stub__fillBinsPjS_jj # -- Begin function _Z23__device_stub__fillBinsPjS_jj
.p2align 4, 0x90
.type _Z23__device_stub__fillBinsPjS_jj,@function
_Z23__device_stub__fillBinsPjS_jj: # @_Z23__device_stub__fillBinsPjS_jj
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8fillBinsPjS_jj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z23__device_stub__fillBinsPjS_jj, .Lfunc_end0-_Z23__device_stub__fillBinsPjS_jj
.cfi_endproc
# -- End function
.globl _Z9histogramPjS_jj # -- Begin function _Z9histogramPjS_jj
.p2align 4, 0x90
.type _Z9histogramPjS_jj,@function
_Z9histogramPjS_jj: # @_Z9histogramPjS_jj
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %ecx, %ebx
movl %edx, %r14d
movq %rsi, %r15
movq %rdi, %r12
leal -1(%r14), %eax
shrl $9, %eax
movabsq $4294967296, %rdx # imm = 0x100000000
leaq (%rdx,%rax), %rdi
incq %rdi
movl %ecx, %r8d
shlq $2, %r8
orq $512, %rdx # imm = 0x200
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq %r12, 72(%rsp)
movq %r15, 64(%rsp)
movl %r14d, 12(%rsp)
movl %ebx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8fillBinsPjS_jj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z9histogramPjS_jj, .Lfunc_end1-_Z9histogramPjS_jj
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8fillBinsPjS_jj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8fillBinsPjS_jj,@object # @_Z8fillBinsPjS_jj
.section .rodata,"a",@progbits
.globl _Z8fillBinsPjS_jj
.p2align 3, 0x0
_Z8fillBinsPjS_jj:
.quad _Z23__device_stub__fillBinsPjS_jj
.size _Z8fillBinsPjS_jj, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8fillBinsPjS_jj"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__fillBinsPjS_jj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8fillBinsPjS_jj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z8fillBinsPjS_jj
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e220000002100 */
/*0020*/ BSSY B0, 0x110 ; /* 0x000000e000007945 */
/* 0x000fe60003800000 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e620000002500 */
/*0040*/ ISETP.GE.U32.AND P2, PT, R7.reuse, c[0x0][0x174], PT ; /* 0x00005d0007007a0c */
/* 0x041fe40003f46070 */
/*0050*/ ISETP.GE.U32.AND P0, PT, R7, c[0x0][0x174], PT ; /* 0x00005d0007007a0c */
/* 0x000fe20003f06070 */
/*0060*/ IMAD R3, R0, c[0x0][0x0], R7 ; /* 0x0000000000037a24 */
/* 0x002fca00078e0207 */
/*0070*/ ISETP.GE.U32.AND P1, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fca0003f26070 */
/*0080*/ @P2 BRA 0x100 ; /* 0x0000007000002947 */
/* 0x000fea0003800000 */
/*0090*/ HFMA2.MMA R2, -RZ, RZ, 0, 0 ; /* 0x00000000ff027435 */
/* 0x000fe200000001ff */
/*00a0*/ IMAD.MOV.U32 R0, RZ, RZ, R7 ; /* 0x000000ffff007224 */
/* 0x000fd200078e0007 */
/*00b0*/ IADD3 R2, R2, c[0x0][0x0], RZ ; /* 0x0000000002027a10 */
/* 0x000fe20007ffe0ff */
/*00c0*/ STS [R0.X4], RZ ; /* 0x000000ff00007388 */
/* 0x0001e80000004800 */
/*00d0*/ IMAD.IADD R0, R2, 0x1, R7 ; /* 0x0000000102007824 */
/* 0x001fca00078e0207 */
/*00e0*/ ISETP.GE.U32.AND P2, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fda0003f46070 */
/*00f0*/ @!P2 BRA 0xb0 ; /* 0xffffffb00000a947 */
/* 0x000fea000383ffff */
/*0100*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0110*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0120*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0130*/ @P1 BRA 0x1e0 ; /* 0x000000a000001947 */
/* 0x000fea0003800000 */
/*0140*/ MOV R0, R3 ; /* 0x0000000300007202 */
/* 0x000fe40000000f00 */
/*0150*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*0160*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x001fcc00078e0203 */
/*0170*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0180*/ MOV R5, c[0x0][0x0] ; /* 0x0000000000057a02 */
/* 0x000fe20000000f00 */
/*0190*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe80003800000 */
/*01a0*/ IMAD R0, R5, c[0x0][0xc], R0 ; /* 0x0000030005007a24 */
/* 0x000fca00078e0200 */
/*01b0*/ ISETP.GE.U32.AND P1, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fe20003f26070 */
/*01c0*/ ATOMS.POPC.INC.32 RZ, [R2.X4+URZ] ; /* 0x0000000002ff7f8c */
/* 0x0041d8000d00403f */
/*01d0*/ @!P1 BRA 0x150 ; /* 0xffffff7000009947 */
/* 0x000fea000383ffff */
/*01e0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*01f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0200*/ @P0 BRA 0x2c0 ; /* 0x000000b000000947 */
/* 0x000fea0003800000 */
/*0210*/ IMAD.MOV.U32 R0, RZ, RZ, RZ ; /* 0x000000ffff007224 */
/* 0x000fe200078e00ff */
/*0220*/ MOV R4, R7 ; /* 0x0000000700047202 */
/* 0x000fe20000000f00 */
/*0230*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4 ; /* 0x00000004ff097424 */
/* 0x000fc800078e00ff */
/*0240*/ LDS R5, [R4.X4] ; /* 0x0000000004057984 */
/* 0x0012a20000004800 */
/*0250*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */
/* 0x000fe20007ffe0ff */
/*0260*/ IMAD.WIDE.U32 R2, R4, R9, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x001fe200078e0009 */
/*0270*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe40003800000 */
/*0280*/ IADD3 R4, R0, R7, RZ ; /* 0x0000000700047210 */
/* 0x002fc80007ffe0ff */
/*0290*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x174], PT ; /* 0x00005d0004007a0c */
/* 0x000fe20003f06070 */
/*02a0*/ RED.E.ADD.STRONG.GPU [R2.64], R5 ; /* 0x000000050200798e */
/* 0x0041d8000c10e184 */
/*02b0*/ @!P0 BRA 0x240 ; /* 0xffffff8000008947 */
/* 0x000fea000383ffff */
/*02c0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*02d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*02e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02f0*/ BRA 0x2f0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8fillBinsPjS_jj
.globl _Z8fillBinsPjS_jj
.p2align 8
.type _Z8fillBinsPjS_jj,@function
_Z8fillBinsPjS_jj:
s_load_b32 s8, s[0:1], 0x14
s_mov_b32 s2, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_gt_u32_e64 s8, v0
s_cbranch_execz .LBB0_3
s_load_b32 s3, s[0:1], 0x24
v_lshl_add_u32 v1, v0, 2, 0
v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, v0
s_mov_b32 s4, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b32 s5, s3, 2
.LBB0_2:
v_add_nc_u32_e32 v3, s3, v3
ds_store_b32 v1, v2
v_add_nc_u32_e32 v1, s5, v1
v_cmp_le_u32_e32 vcc_lo, s8, v3
s_or_b32 s4, vcc_lo, s4
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s4
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s10, s[0:1], 0x10
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_mov_b32 s11, exec_lo
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s9, v[0:1]
v_cmpx_gt_u32_e64 s10, v1
s_cbranch_execz .LBB0_6
s_load_b32 s4, s[2:3], 0x0
s_load_b64 s[2:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v4, 1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_mul_i32 s4, s4, s9
v_add_co_u32 v2, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
s_ashr_i32 s5, s4, 31
s_mov_b32 s3, 0
s_lshl_b64 s[6:7], s[4:5], 2
.LBB0_5:
global_load_b32 v5, v[2:3], off
v_add_nc_u32_e32 v1, s4, v1
v_add_co_u32 v2, s2, v2, s6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v3, s2, s7, v3, s2
v_cmp_le_u32_e32 vcc_lo, s10, v1
s_or_b32 s3, vcc_lo, s3
s_waitcnt vmcnt(0)
v_lshl_add_u32 v5, v5, 2, 0
ds_add_u32 v5, v4
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_5
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s11
s_delay_alu instid0(SALU_CYCLE_1)
s_mov_b32 s2, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_u32_e64 s8, v0
s_cbranch_execz .LBB0_9
s_load_b64 s[0:1], s[0:1], 0x8
v_lshl_add_u32 v2, v0, 2, 0
v_mov_b32_e32 v1, 0
s_mov_b32 s3, 0
s_lshl_b32 s4, s9, 2
.LBB0_8:
ds_load_b32 v5, v2
v_lshlrev_b64 v[3:4], 2, v[0:1]
v_add_nc_u32_e32 v0, s9, v0
v_add_nc_u32_e32 v2, s4, v2
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v3, vcc_lo, s0, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s8, v0
s_or_b32 s3, vcc_lo, s3
global_atomic_add_u32 v[3:4], v5, off
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_8
.LBB0_9:
s_or_b32 exec_lo, exec_lo, s2
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8fillBinsPjS_jj
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8fillBinsPjS_jj, .Lfunc_end0-_Z8fillBinsPjS_jj
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym private_histogram
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8fillBinsPjS_jj
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8fillBinsPjS_jj.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001858c9_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z8fillBinsPjS_jjPjS_jj
.type _Z31__device_stub__Z8fillBinsPjS_jjPjS_jj, @function
_Z31__device_stub__Z8fillBinsPjS_jjPjS_jj:
.LFB2052:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8fillBinsPjS_jj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z31__device_stub__Z8fillBinsPjS_jjPjS_jj, .-_Z31__device_stub__Z8fillBinsPjS_jjPjS_jj
.globl _Z8fillBinsPjS_jj
.type _Z8fillBinsPjS_jj, @function
_Z8fillBinsPjS_jj:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z8fillBinsPjS_jjPjS_jj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z8fillBinsPjS_jj, .-_Z8fillBinsPjS_jj
.globl _Z9histogramPjS_jj
.type _Z9histogramPjS_jj, @function
_Z9histogramPjS_jj:
.LFB2027:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %r12
movq %rsi, %r13
movl %edx, %ebx
movl %ecx, %ebp
leal -1(%rdx), %eax
shrl $9, %eax
addl $1, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $512, 20(%rsp)
movl $1, 24(%rsp)
movl %ecx, %eax
movl $0, %r9d
leaq 0(,%rax,4), %r8
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L11:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movl %ebp, %ecx
movl %ebx, %edx
movq %r13, %rsi
movq %r12, %rdi
call _Z31__device_stub__Z8fillBinsPjS_jjPjS_jj
jmp .L11
.cfi_endproc
.LFE2027:
.size _Z9histogramPjS_jj, .-_Z9histogramPjS_jj
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z8fillBinsPjS_jj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8fillBinsPjS_jj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel.hip"
.globl _Z23__device_stub__fillBinsPjS_jj # -- Begin function _Z23__device_stub__fillBinsPjS_jj
.p2align 4, 0x90
.type _Z23__device_stub__fillBinsPjS_jj,@function
_Z23__device_stub__fillBinsPjS_jj: # @_Z23__device_stub__fillBinsPjS_jj
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8fillBinsPjS_jj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z23__device_stub__fillBinsPjS_jj, .Lfunc_end0-_Z23__device_stub__fillBinsPjS_jj
.cfi_endproc
# -- End function
.globl _Z9histogramPjS_jj # -- Begin function _Z9histogramPjS_jj
.p2align 4, 0x90
.type _Z9histogramPjS_jj,@function
_Z9histogramPjS_jj: # @_Z9histogramPjS_jj
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %ecx, %ebx
movl %edx, %r14d
movq %rsi, %r15
movq %rdi, %r12
leal -1(%r14), %eax
shrl $9, %eax
movabsq $4294967296, %rdx # imm = 0x100000000
leaq (%rdx,%rax), %rdi
incq %rdi
movl %ecx, %r8d
shlq $2, %r8
orq $512, %rdx # imm = 0x200
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq %r12, 72(%rsp)
movq %r15, 64(%rsp)
movl %r14d, 12(%rsp)
movl %ebx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8fillBinsPjS_jj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z9histogramPjS_jj, .Lfunc_end1-_Z9histogramPjS_jj
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8fillBinsPjS_jj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8fillBinsPjS_jj,@object # @_Z8fillBinsPjS_jj
.section .rodata,"a",@progbits
.globl _Z8fillBinsPjS_jj
.p2align 3, 0x0
_Z8fillBinsPjS_jj:
.quad _Z23__device_stub__fillBinsPjS_jj
.size _Z8fillBinsPjS_jj, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8fillBinsPjS_jj"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__fillBinsPjS_jj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8fillBinsPjS_jj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <assert.h>
__global__ void jacobiKernel(float* A, float* b, int N, float* x_now, float* x_next)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
float sum = 0.0;
int idx_Ai = N * idx;
for (int j = 0; j < N; ++j)
if (idx != j)
sum += A[idx_Ai + j] * x_now[j];
x_next[idx] = (b[idx] - sum) / A[idx_Ai + idx];
}
}
void parse_argv(int argc, char *argv[], char **fname, int *iter, int *blockSize)
{
static struct option long_options[] =
{
{"file", required_argument, NULL, 'f'},
{"iterations", optional_argument, NULL, 'i'},
{"blockSize", optional_argument, NULL, 'b'},
{NULL, 0, NULL, 0}
};
int ch = 0;
while ((ch = getopt_long(argc, argv, "f:i:b:", long_options, NULL)) != -1) {
switch (ch) {
case 'f' : *fname = optarg;
break;
case 'i' : *iter = atoi(optarg);
break;
case 'b' : *blockSize = atoi(optarg);
break;
default:
abort();
}
}
}
int main(int argc, char *argv[])
{
int N, i, iter = 10000, blockSize = 512;
char *fname = NULL;
parse_argv(argc, argv, &fname, &iter, &blockSize);
FILE *file = fopen(fname, "r");
if (file == NULL)
exit(EXIT_FAILURE);
fscanf(file, "%d", &N);
printf("N = %d, iter = %d, blocksize = %d\n", N, iter, blockSize);
float *A = (float *)calloc(N * N, sizeof(float));
float *b = (float *)calloc(N, sizeof(float));
float *x = (float *)calloc(N, sizeof(float));
assert(A != NULL);
assert(b != NULL);
assert(x != NULL);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
fscanf(file, "%f", &A[N * i + j]);
}
fscanf(file, "%f", &b[i]);
}
float *x0_d, *x1_d, *A_d, *b_d;
assert(cudaSuccess == cudaMalloc((void **) &A_d, N * N * sizeof(float)));
assert(cudaSuccess == cudaMalloc((void **) &b_d, N * sizeof(float)));
assert(cudaSuccess == cudaMalloc((void **) &x0_d, N * sizeof(float)));
assert(cudaSuccess == cudaMalloc((void **) &x1_d, N * sizeof(float)));
cudaMemcpy(A_d, A, sizeof(float) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(x0_d, x, sizeof(float) * N, cudaMemcpyHostToDevice);
int nBlocks = (N + blockSize - 1) / blockSize;
printf("Running Jacobi method...\n");
for (i = 0; i < iter; ++i)
{
float *xnext = (i % 2 ? x0_d : x1_d);
float *xnow = (i % 2 ? x1_d : x0_d);
jacobiKernel <<< nBlocks, blockSize >>> (A_d, b_d, N, xnow, xnext);
}
cudaMemcpy(x, (iter % 2 ? x1_d : x0_d), sizeof(float) * N, cudaMemcpyDeviceToHost);
cudaFree(A_d); cudaFree(b_d); cudaFree(x0_d); cudaFree(x1_d);
free(A); free(b);
printf("\nResult after %d iterations:\n", iter);
for (i = 0; i < N; i++)
printf("x[%d] = %f\n", i, x[i]);
return 0;
} | .file "tmpxft_000877ce_00000000-6_jacobi.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "f:i:b:"
.text
.globl _Z10parse_argviPPcS0_PiS1_
.type _Z10parse_argviPPcS0_PiS1_, @function
_Z10parse_argviPPcS0_PiS1_:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl %edi, %ebp
movq %rsi, %rbx
movq %rdx, %r15
movq %rcx, %r14
movq %r8, 8(%rsp)
leaq _ZZ10parse_argviPPcS0_PiS1_E12long_options(%rip), %r13
leaq .LC0(%rip), %r12
jmp .L4
.L15:
cmpl $98, %eax
jne .L13
movl $10, %edx
movl $0, %esi
movq optarg(%rip), %rdi
call __isoc23_strtol@PLT
movq 8(%rsp), %rcx
movl %eax, (%rcx)
jmp .L4
.L5:
movq optarg(%rip), %rax
movq %rax, (%r15)
.L4:
movl $0, %r8d
movq %r13, %rcx
movq %r12, %rdx
movq %rbx, %rsi
movl %ebp, %edi
call getopt_long@PLT
cmpl $-1, %eax
je .L14
cmpl $102, %eax
je .L5
cmpl $105, %eax
jne .L15
movl $10, %edx
movl $0, %esi
movq optarg(%rip), %rdi
call __isoc23_strtol@PLT
movl %eax, (%r14)
jmp .L4
.L13:
call abort@PLT
.L14:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z10parse_argviPPcS0_PiS1_, .-_Z10parse_argviPPcS0_PiS1_
.globl _Z39__device_stub__Z12jacobiKernelPfS_iS_S_PfS_iS_S_
.type _Z39__device_stub__Z12jacobiKernelPfS_iS_S_PfS_iS_S_, @function
_Z39__device_stub__Z12jacobiKernelPfS_iS_S_PfS_iS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movl %edx, 28(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z12jacobiKernelPfS_iS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z39__device_stub__Z12jacobiKernelPfS_iS_S_PfS_iS_S_, .-_Z39__device_stub__Z12jacobiKernelPfS_iS_S_PfS_iS_S_
.globl _Z12jacobiKernelPfS_iS_S_
.type _Z12jacobiKernelPfS_iS_S_, @function
_Z12jacobiKernelPfS_iS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z12jacobiKernelPfS_iS_S_PfS_iS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z12jacobiKernelPfS_iS_S_, .-_Z12jacobiKernelPfS_iS_S_
.section .rodata.str1.1
.LC1:
.string "r"
.LC2:
.string "%d"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "N = %d, iter = %d, blocksize = %d\n"
.section .rodata.str1.1
.LC4:
.string "%f"
.LC5:
.string "Running Jacobi method...\n"
.LC6:
.string "\nResult after %d iterations:\n"
.LC7:
.string "x[%d] = %f\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $10000, 48(%rsp)
movl $512, 52(%rsp)
movq $0, 56(%rsp)
leaq 48(%rsp), %rcx
leaq 56(%rsp), %rdx
leaq 52(%rsp), %r8
call _Z10parse_argviPPcS0_PiS1_
leaq .LC1(%rip), %rsi
movq 56(%rsp), %rdi
call fopen@PLT
testq %rax, %rax
je .L42
movq %rax, %r12
leaq 44(%rsp), %rdx
leaq .LC2(%rip), %rsi
movq %rax, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movl 52(%rsp), %r8d
movl %r8d, 20(%rsp)
movl 48(%rsp), %eax
movl %eax, 16(%rsp)
movl %eax, %ecx
movl 44(%rsp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 44(%rsp), %ebx
movl %ebx, %edi
imull %ebx, %edi
movslq %edi, %rdi
movl $4, %esi
call calloc@PLT
movq %rax, %r14
movslq %ebx, %rbp
movl $4, %esi
movq %rbp, %rdi
call calloc@PLT
movq %rax, %r15
movq %rax, 24(%rsp)
movl $4, %esi
movq %rbp, %rdi
call calloc@PLT
movq %rax, 8(%rsp)
testl %ebx, %ebx
jle .L26
movl $0, %ebp
leaq .LC4(%rip), %r13
jmp .L27
.L42:
movl $1, %edi
call exit@PLT
.L28:
imull %ebp, %eax
addl %ebx, %eax
cltq
leaq (%r14,%rax,4), %rdx
movq %r13, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
addl $1, %ebx
movl 44(%rsp), %eax
cmpl %ebx, %eax
jg .L28
.L29:
movq %r15, %rdx
movq %r13, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
addl $1, %ebp
movl 44(%rsp), %ebx
addq $4, %r15
cmpl %ebp, %ebx
jle .L26
.L27:
movl 44(%rsp), %eax
movl $0, %ebx
testl %eax, %eax
jg .L28
jmp .L29
.L26:
movslq %ebx, %rbx
imulq %rbx, %rbx
leaq 0(,%rbx,4), %rdx
movl $1, %ecx
movq %r14, %rsi
movl $0, %edi
call cudaMemcpy@PLT
movslq 44(%rsp), %rdx
salq $2, %rdx
movl $1, %ecx
movq 24(%rsp), %rsi
movl $0, %edi
call cudaMemcpy@PLT
movslq 44(%rsp), %rdx
salq $2, %rdx
movl $1, %ecx
movq 8(%rsp), %rsi
movl $0, %r12d
movq %r12, %rdi
call cudaMemcpy@PLT
movl 20(%rsp), %ecx
movl %ecx, %eax
addl 44(%rsp), %eax
subl $1, %eax
cltd
idivl %ecx
movl %eax, %ebp
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 16(%rsp)
jle .L30
movl $0, %ebx
jmp .L32
.L31:
addl $1, %ebx
cmpl %ebx, 16(%rsp)
je .L30
.L32:
movl 20(%rsp), %eax
movl %eax, 76(%rsp)
movl $1, 80(%rsp)
movl %ebp, 64(%rsp)
movl $1, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L31
movl $0, %r8d
movl $0, %r12d
movq %r12, %rcx
movl 44(%rsp), %edx
movl $0, %edi
movq %rdi, %rsi
call _Z39__device_stub__Z12jacobiKernelPfS_iS_S_PfS_iS_S_
jmp .L31
.L30:
movslq 44(%rsp), %rdx
salq $2, %rdx
movl $2, %ecx
movl $0, %r12d
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %edi
call cudaFree@PLT
movl $0, %edi
call cudaFree@PLT
movq %r12, %rdi
call cudaFree@PLT
movl $0, %edi
call cudaFree@PLT
movq %r14, %rdi
call free@PLT
movq 24(%rsp), %rdi
call free@PLT
movl 16(%rsp), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 44(%rsp)
jle .L33
movl $0, %ebx
leaq .LC7(%rip), %rbp
.L34:
movq 8(%rsp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbx,4), %xmm0
movl %ebx, %edx
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpl %ebx, 44(%rsp)
jg .L34
.L33:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L43
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z12jacobiKernelPfS_iS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z12jacobiKernelPfS_iS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.str1.1
.LC9:
.string "file"
.LC10:
.string "iterations"
.LC11:
.string "blockSize"
.section .data.rel.local,"aw"
.align 32
.type _ZZ10parse_argviPPcS0_PiS1_E12long_options, @object
.size _ZZ10parse_argviPPcS0_PiS1_E12long_options, 128
_ZZ10parse_argviPPcS0_PiS1_E12long_options:
.quad .LC9
.long 1
.zero 4
.quad 0
.long 102
.zero 4
.quad .LC10
.long 2
.zero 4
.quad 0
.long 105
.zero 4
.quad .LC11
.long 2
.zero 4
.quad 0
.long 98
.zero 4
.quad 0
.long 0
.zero 4
.quad 0
.long 0
.zero 4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <assert.h>
__global__ void jacobiKernel(float* A, float* b, int N, float* x_now, float* x_next)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
float sum = 0.0;
int idx_Ai = N * idx;
for (int j = 0; j < N; ++j)
if (idx != j)
sum += A[idx_Ai + j] * x_now[j];
x_next[idx] = (b[idx] - sum) / A[idx_Ai + idx];
}
}
void parse_argv(int argc, char *argv[], char **fname, int *iter, int *blockSize)
{
static struct option long_options[] =
{
{"file", required_argument, NULL, 'f'},
{"iterations", optional_argument, NULL, 'i'},
{"blockSize", optional_argument, NULL, 'b'},
{NULL, 0, NULL, 0}
};
int ch = 0;
while ((ch = getopt_long(argc, argv, "f:i:b:", long_options, NULL)) != -1) {
switch (ch) {
case 'f' : *fname = optarg;
break;
case 'i' : *iter = atoi(optarg);
break;
case 'b' : *blockSize = atoi(optarg);
break;
default:
abort();
}
}
}
int main(int argc, char *argv[])
{
int N, i, iter = 10000, blockSize = 512;
char *fname = NULL;
parse_argv(argc, argv, &fname, &iter, &blockSize);
FILE *file = fopen(fname, "r");
if (file == NULL)
exit(EXIT_FAILURE);
fscanf(file, "%d", &N);
printf("N = %d, iter = %d, blocksize = %d\n", N, iter, blockSize);
float *A = (float *)calloc(N * N, sizeof(float));
float *b = (float *)calloc(N, sizeof(float));
float *x = (float *)calloc(N, sizeof(float));
assert(A != NULL);
assert(b != NULL);
assert(x != NULL);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
fscanf(file, "%f", &A[N * i + j]);
}
fscanf(file, "%f", &b[i]);
}
float *x0_d, *x1_d, *A_d, *b_d;
assert(cudaSuccess == cudaMalloc((void **) &A_d, N * N * sizeof(float)));
assert(cudaSuccess == cudaMalloc((void **) &b_d, N * sizeof(float)));
assert(cudaSuccess == cudaMalloc((void **) &x0_d, N * sizeof(float)));
assert(cudaSuccess == cudaMalloc((void **) &x1_d, N * sizeof(float)));
cudaMemcpy(A_d, A, sizeof(float) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(x0_d, x, sizeof(float) * N, cudaMemcpyHostToDevice);
int nBlocks = (N + blockSize - 1) / blockSize;
printf("Running Jacobi method...\n");
for (i = 0; i < iter; ++i)
{
float *xnext = (i % 2 ? x0_d : x1_d);
float *xnow = (i % 2 ? x1_d : x0_d);
jacobiKernel <<< nBlocks, blockSize >>> (A_d, b_d, N, xnow, xnext);
}
cudaMemcpy(x, (iter % 2 ? x1_d : x0_d), sizeof(float) * N, cudaMemcpyDeviceToHost);
cudaFree(A_d); cudaFree(b_d); cudaFree(x0_d); cudaFree(x1_d);
free(A); free(b);
printf("\nResult after %d iterations:\n", iter);
for (i = 0; i < N; i++)
printf("x[%d] = %f\n", i, x[i]);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <assert.h>
__global__ void jacobiKernel(float* A, float* b, int N, float* x_now, float* x_next)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
float sum = 0.0;
int idx_Ai = N * idx;
for (int j = 0; j < N; ++j)
if (idx != j)
sum += A[idx_Ai + j] * x_now[j];
x_next[idx] = (b[idx] - sum) / A[idx_Ai + idx];
}
}
void parse_argv(int argc, char *argv[], char **fname, int *iter, int *blockSize)
{
static struct option long_options[] =
{
{"file", required_argument, NULL, 'f'},
{"iterations", optional_argument, NULL, 'i'},
{"blockSize", optional_argument, NULL, 'b'},
{NULL, 0, NULL, 0}
};
int ch = 0;
while ((ch = getopt_long(argc, argv, "f:i:b:", long_options, NULL)) != -1) {
switch (ch) {
case 'f' : *fname = optarg;
break;
case 'i' : *iter = atoi(optarg);
break;
case 'b' : *blockSize = atoi(optarg);
break;
default:
abort();
}
}
}
int main(int argc, char *argv[])
{
int N, i, iter = 10000, blockSize = 512;
char *fname = NULL;
parse_argv(argc, argv, &fname, &iter, &blockSize);
FILE *file = fopen(fname, "r");
if (file == NULL)
exit(EXIT_FAILURE);
fscanf(file, "%d", &N);
printf("N = %d, iter = %d, blocksize = %d\n", N, iter, blockSize);
float *A = (float *)calloc(N * N, sizeof(float));
float *b = (float *)calloc(N, sizeof(float));
float *x = (float *)calloc(N, sizeof(float));
assert(A != NULL);
assert(b != NULL);
assert(x != NULL);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
fscanf(file, "%f", &A[N * i + j]);
}
fscanf(file, "%f", &b[i]);
}
float *x0_d, *x1_d, *A_d, *b_d;
assert(hipSuccess == hipMalloc((void **) &A_d, N * N * sizeof(float)));
assert(hipSuccess == hipMalloc((void **) &b_d, N * sizeof(float)));
assert(hipSuccess == hipMalloc((void **) &x0_d, N * sizeof(float)));
assert(hipSuccess == hipMalloc((void **) &x1_d, N * sizeof(float)));
hipMemcpy(A_d, A, sizeof(float) * N * N, hipMemcpyHostToDevice);
hipMemcpy(b_d, b, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(x0_d, x, sizeof(float) * N, hipMemcpyHostToDevice);
int nBlocks = (N + blockSize - 1) / blockSize;
printf("Running Jacobi method...\n");
for (i = 0; i < iter; ++i)
{
float *xnext = (i % 2 ? x0_d : x1_d);
float *xnow = (i % 2 ? x1_d : x0_d);
jacobiKernel <<< nBlocks, blockSize >>> (A_d, b_d, N, xnow, xnext);
}
hipMemcpy(x, (iter % 2 ? x1_d : x0_d), sizeof(float) * N, hipMemcpyDeviceToHost);
hipFree(A_d); hipFree(b_d); hipFree(x0_d); hipFree(x1_d);
free(A); free(b);
printf("\nResult after %d iterations:\n", iter);
for (i = 0; i < N; i++)
printf("x[%d] = %f\n", i, x[i]);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <assert.h>
__global__ void jacobiKernel(float* A, float* b, int N, float* x_now, float* x_next)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
float sum = 0.0;
int idx_Ai = N * idx;
for (int j = 0; j < N; ++j)
if (idx != j)
sum += A[idx_Ai + j] * x_now[j];
x_next[idx] = (b[idx] - sum) / A[idx_Ai + idx];
}
}
void parse_argv(int argc, char *argv[], char **fname, int *iter, int *blockSize)
{
static struct option long_options[] =
{
{"file", required_argument, NULL, 'f'},
{"iterations", optional_argument, NULL, 'i'},
{"blockSize", optional_argument, NULL, 'b'},
{NULL, 0, NULL, 0}
};
int ch = 0;
while ((ch = getopt_long(argc, argv, "f:i:b:", long_options, NULL)) != -1) {
switch (ch) {
case 'f' : *fname = optarg;
break;
case 'i' : *iter = atoi(optarg);
break;
case 'b' : *blockSize = atoi(optarg);
break;
default:
abort();
}
}
}
int main(int argc, char *argv[])
{
int N, i, iter = 10000, blockSize = 512;
char *fname = NULL;
parse_argv(argc, argv, &fname, &iter, &blockSize);
FILE *file = fopen(fname, "r");
if (file == NULL)
exit(EXIT_FAILURE);
fscanf(file, "%d", &N);
printf("N = %d, iter = %d, blocksize = %d\n", N, iter, blockSize);
float *A = (float *)calloc(N * N, sizeof(float));
float *b = (float *)calloc(N, sizeof(float));
float *x = (float *)calloc(N, sizeof(float));
assert(A != NULL);
assert(b != NULL);
assert(x != NULL);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
fscanf(file, "%f", &A[N * i + j]);
}
fscanf(file, "%f", &b[i]);
}
float *x0_d, *x1_d, *A_d, *b_d;
assert(hipSuccess == hipMalloc((void **) &A_d, N * N * sizeof(float)));
assert(hipSuccess == hipMalloc((void **) &b_d, N * sizeof(float)));
assert(hipSuccess == hipMalloc((void **) &x0_d, N * sizeof(float)));
assert(hipSuccess == hipMalloc((void **) &x1_d, N * sizeof(float)));
hipMemcpy(A_d, A, sizeof(float) * N * N, hipMemcpyHostToDevice);
hipMemcpy(b_d, b, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(x0_d, x, sizeof(float) * N, hipMemcpyHostToDevice);
int nBlocks = (N + blockSize - 1) / blockSize;
printf("Running Jacobi method...\n");
for (i = 0; i < iter; ++i)
{
float *xnext = (i % 2 ? x0_d : x1_d);
float *xnow = (i % 2 ? x1_d : x0_d);
jacobiKernel <<< nBlocks, blockSize >>> (A_d, b_d, N, xnow, xnext);
}
hipMemcpy(x, (iter % 2 ? x1_d : x0_d), sizeof(float) * N, hipMemcpyDeviceToHost);
hipFree(A_d); hipFree(b_d); hipFree(x0_d); hipFree(x1_d);
free(A); free(b);
printf("\nResult after %d iterations:\n", iter);
for (i = 0; i < N; i++)
printf("x[%d] = %f\n", i, x[i]);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12jacobiKernelPfS_iS_S_
.globl _Z12jacobiKernelPfS_iS_S_
.p2align 8
.type _Z12jacobiKernelPfS_iS_S_,@function
_Z12jacobiKernelPfS_iS_S_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s6, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s6, v1
s_cbranch_execz .LBB0_8
s_load_b64 s[2:3], s[0:1], 0x0
v_mul_lo_u32 v4, v1, s6
s_cmp_lt_i32 s6, 1
s_cbranch_scc1 .LBB0_6
s_load_b64 s[4:5], s[0:1], 0x18
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v5, v1
s_delay_alu instid0(VALU_DEP_2)
v_mov_b32_e32 v2, v4
s_branch .LBB0_4
.p2align 6
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s7
s_add_i32 s6, s6, -1
v_add_nc_u32_e32 v5, -1, v5
v_add_nc_u32_e32 v2, 1, v2
s_waitcnt lgkmcnt(0)
s_add_u32 s4, s4, 4
s_addc_u32 s5, s5, 0
s_cmp_eq_u32 s6, 0
s_cbranch_scc1 .LBB0_7
.LBB0_4:
s_mov_b32 s7, exec_lo
s_delay_alu instid0(VALU_DEP_2)
v_cmpx_ne_u32_e32 0, v5
s_cbranch_execz .LBB0_3
v_ashrrev_i32_e32 v3, 31, v2
s_waitcnt lgkmcnt(0)
s_load_b32 s8, s[4:5], 0x0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[2:3]
v_add_co_u32 v6, vcc_lo, s2, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo
global_load_b32 v3, v[6:7], off
s_waitcnt vmcnt(0) lgkmcnt(0)
v_fmac_f32_e32 v0, s8, v3
s_branch .LBB0_3
.LBB0_6:
v_mov_b32_e32 v0, 0
.LBB0_7:
s_load_b64 s[4:5], s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v3, v4, v1
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x20
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_lshlrev_b64 v[3:4], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v5, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v3, vcc_lo, s2, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
global_load_b32 v5, v[5:6], off
global_load_b32 v3, v[3:4], off
s_waitcnt vmcnt(1)
v_sub_f32_e32 v0, v5, v0
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f32 v4, null, v3, v3, v0
v_div_scale_f32 v7, vcc_lo, v0, v3, v0
v_rcp_f32_e32 v5, v4
s_waitcnt_depctr 0xfff
v_fma_f32 v6, -v4, v5, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v5, v6, v5
v_mul_f32_e32 v6, v7, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v8, -v4, v6, v7
v_fmac_f32_e32 v6, v8, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v4, -v4, v6, v7
v_div_fmas_f32 v4, v4, v5, v6
s_delay_alu instid0(VALU_DEP_1)
v_div_fixup_f32 v3, v4, v3, v0
v_add_co_u32 v0, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[0:1], v3, off
.LBB0_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12jacobiKernelPfS_iS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12jacobiKernelPfS_iS_S_, .Lfunc_end0-_Z12jacobiKernelPfS_iS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12jacobiKernelPfS_iS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12jacobiKernelPfS_iS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <assert.h>
__global__ void jacobiKernel(float* A, float* b, int N, float* x_now, float* x_next)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
float sum = 0.0;
int idx_Ai = N * idx;
for (int j = 0; j < N; ++j)
if (idx != j)
sum += A[idx_Ai + j] * x_now[j];
x_next[idx] = (b[idx] - sum) / A[idx_Ai + idx];
}
}
void parse_argv(int argc, char *argv[], char **fname, int *iter, int *blockSize)
{
static struct option long_options[] =
{
{"file", required_argument, NULL, 'f'},
{"iterations", optional_argument, NULL, 'i'},
{"blockSize", optional_argument, NULL, 'b'},
{NULL, 0, NULL, 0}
};
int ch = 0;
while ((ch = getopt_long(argc, argv, "f:i:b:", long_options, NULL)) != -1) {
switch (ch) {
case 'f' : *fname = optarg;
break;
case 'i' : *iter = atoi(optarg);
break;
case 'b' : *blockSize = atoi(optarg);
break;
default:
abort();
}
}
}
int main(int argc, char *argv[])
{
int N, i, iter = 10000, blockSize = 512;
char *fname = NULL;
parse_argv(argc, argv, &fname, &iter, &blockSize);
FILE *file = fopen(fname, "r");
if (file == NULL)
exit(EXIT_FAILURE);
fscanf(file, "%d", &N);
printf("N = %d, iter = %d, blocksize = %d\n", N, iter, blockSize);
float *A = (float *)calloc(N * N, sizeof(float));
float *b = (float *)calloc(N, sizeof(float));
float *x = (float *)calloc(N, sizeof(float));
assert(A != NULL);
assert(b != NULL);
assert(x != NULL);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
fscanf(file, "%f", &A[N * i + j]);
}
fscanf(file, "%f", &b[i]);
}
float *x0_d, *x1_d, *A_d, *b_d;
assert(hipSuccess == hipMalloc((void **) &A_d, N * N * sizeof(float)));
assert(hipSuccess == hipMalloc((void **) &b_d, N * sizeof(float)));
assert(hipSuccess == hipMalloc((void **) &x0_d, N * sizeof(float)));
assert(hipSuccess == hipMalloc((void **) &x1_d, N * sizeof(float)));
hipMemcpy(A_d, A, sizeof(float) * N * N, hipMemcpyHostToDevice);
hipMemcpy(b_d, b, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(x0_d, x, sizeof(float) * N, hipMemcpyHostToDevice);
int nBlocks = (N + blockSize - 1) / blockSize;
printf("Running Jacobi method...\n");
for (i = 0; i < iter; ++i)
{
float *xnext = (i % 2 ? x0_d : x1_d);
float *xnow = (i % 2 ? x1_d : x0_d);
jacobiKernel <<< nBlocks, blockSize >>> (A_d, b_d, N, xnow, xnext);
}
hipMemcpy(x, (iter % 2 ? x1_d : x0_d), sizeof(float) * N, hipMemcpyDeviceToHost);
hipFree(A_d); hipFree(b_d); hipFree(x0_d); hipFree(x1_d);
free(A); free(b);
printf("\nResult after %d iterations:\n", iter);
for (i = 0; i < N; i++)
printf("x[%d] = %f\n", i, x[i]);
return 0;
} | .text
.file "jacobi.hip"
.globl _Z27__device_stub__jacobiKernelPfS_iS_S_ # -- Begin function _Z27__device_stub__jacobiKernelPfS_iS_S_
.p2align 4, 0x90
.type _Z27__device_stub__jacobiKernelPfS_iS_S_,@function
_Z27__device_stub__jacobiKernelPfS_iS_S_: # @_Z27__device_stub__jacobiKernelPfS_iS_S_
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 72(%rsp)
movq %r8, 64(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12jacobiKernelPfS_iS_S_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z27__device_stub__jacobiKernelPfS_iS_S_, .Lfunc_end0-_Z27__device_stub__jacobiKernelPfS_iS_S_
.cfi_endproc
# -- End function
.globl _Z10parse_argviPPcS0_PiS1_ # -- Begin function _Z10parse_argviPPcS0_PiS1_
.p2align 4, 0x90
.type _Z10parse_argviPPcS0_PiS1_,@function
_Z10parse_argviPPcS0_PiS1_: # @_Z10parse_argviPPcS0_PiS1_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r8, %rbx
movq %rcx, %r14
movq %rdx, %r15
movq %rsi, %r12
movl %edi, %ebp
jmp .LBB1_1
.p2align 4, 0x90
.LBB1_8: # in Loop: Header=BB1_1 Depth=1
movq optarg(%rip), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, (%r14)
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl $.L.str.3, %edx
movl $_ZZ10parse_argviPPcS0_PiS1_E12long_options, %ecx
movl %ebp, %edi
movq %r12, %rsi
xorl %r8d, %r8d
callq getopt_long
cmpl $101, %eax
jle .LBB1_2
# %bb.5: # in Loop: Header=BB1_1 Depth=1
cmpl $105, %eax
je .LBB1_8
# %bb.6: # in Loop: Header=BB1_1 Depth=1
cmpl $102, %eax
jne .LBB1_4
# %bb.7: # in Loop: Header=BB1_1 Depth=1
movq optarg(%rip), %rax
movq %rax, (%r15)
jmp .LBB1_1
.p2align 4, 0x90
.LBB1_2: # in Loop: Header=BB1_1 Depth=1
cmpl $98, %eax
jne .LBB1_3
# %bb.9: # in Loop: Header=BB1_1 Depth=1
movq optarg(%rip), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, (%rbx)
jmp .LBB1_1
.LBB1_3:
cmpl $-1, %eax
jne .LBB1_4
# %bb.10:
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_4:
.cfi_def_cfa_offset 48
callq abort
.Lfunc_end1:
.size _Z10parse_argviPPcS0_PiS1_, .Lfunc_end1-_Z10parse_argviPPcS0_PiS1_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r14
movl %edi, %ebp
movl $10000, %ebx # imm = 0x2710
movl $512, %r15d # imm = 0x200
xorl %r12d, %r12d
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_6: # in Loop: Header=BB2_1 Depth=1
movq optarg(%rip), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
.LBB2_1: # =>This Inner Loop Header: Depth=1
movl $.L.str.3, %edx
movl $_ZZ10parse_argviPPcS0_PiS1_E12long_options, %ecx
movl %ebp, %edi
movq %r14, %rsi
xorl %r8d, %r8d
callq getopt_long
cmpl $105, %eax
je .LBB2_6
# %bb.2: # in Loop: Header=BB2_1 Depth=1
cmpl $-1, %eax
je .LBB2_9
# %bb.3: # in Loop: Header=BB2_1 Depth=1
cmpl $98, %eax
je .LBB2_7
# %bb.4: # in Loop: Header=BB2_1 Depth=1
cmpl $102, %eax
jne .LBB2_8
# %bb.5: # in Loop: Header=BB2_1 Depth=1
movq optarg(%rip), %r12
jmp .LBB2_1
.LBB2_7: # in Loop: Header=BB2_1 Depth=1
movq optarg(%rip), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
jmp .LBB2_1
.LBB2_9: # %_Z10parse_argviPPcS0_PiS1_.exit
movl $.L.str.4, %esi
movq %r12, %rdi
callq fopen
testq %rax, %rax
je .LBB2_26
# %bb.10:
movq %rax, %rbp
leaq 8(%rsp), %rdx
movl $.L.str.5, %esi
movq %rax, %rdi
xorl %eax, %eax
callq __isoc23_fscanf
movl 8(%rsp), %esi
movl $.L.str.6, %edi
movl %ebx, %edx
movl %r15d, %ecx
xorl %eax, %eax
callq printf
movslq 8(%rsp), %r14
movl %r14d, %edi
imull %edi, %edi
movl $4, %esi
callq calloc
movq %rax, %r12
movl $4, %esi
movq %r14, %rdi
callq calloc
movq %rax, %r13
movl $4, %esi
movq %r14, %rdi
callq calloc
movq %rax, %r14
movl 8(%rsp), %eax
testl %eax, %eax
movq %r14, 16(%rsp) # 8-byte Spill
jle .LBB2_17
# %bb.11: # %.preheader.preheader
movq %r15, 24(%rsp) # 8-byte Spill
xorl %r15d, %r15d
jmp .LBB2_12
.p2align 4, 0x90
.LBB2_15: # %._crit_edge
# in Loop: Header=BB2_12 Depth=1
leaq (,%r15,4), %rdx
addq %r13, %rdx
movl $.L.str.7, %esi
movq %rbp, %rdi
xorl %eax, %eax
callq __isoc23_fscanf
incq %r15
movslq 8(%rsp), %rax
cmpq %rax, %r15
jge .LBB2_16
.LBB2_12: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_14 Depth 2
movl 8(%rsp), %eax
testl %eax, %eax
jle .LBB2_15
# %bb.13: # %.lr.ph.preheader
# in Loop: Header=BB2_12 Depth=1
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_14: # %.lr.ph
# Parent Loop BB2_12 Depth=1
# => This Inner Loop Header: Depth=2
imull %r15d, %eax
cltq
addq %r14, %rax
leaq (%r12,%rax,4), %rdx
movl $.L.str.7, %esi
movq %rbp, %rdi
xorl %eax, %eax
callq __isoc23_fscanf
movl 8(%rsp), %eax
incq %r14
cmpl %eax, %r14d
jl .LBB2_14
jmp .LBB2_15
.LBB2_16:
movq 16(%rsp), %r14 # 8-byte Reload
movq 24(%rsp), %r15 # 8-byte Reload
.LBB2_17: # %._crit_edge75
movslq %eax, %rdx
imulq %rdx, %rdx
shlq $2, %rdx
movq %r12, %rsi
movl $1, %ecx
callq hipMemcpy
movslq 8(%rsp), %rdx
shlq $2, %rdx
movq %r13, %rsi
movl $1, %ecx
callq hipMemcpy
movslq 8(%rsp), %rdx
shlq $2, %rdx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movl 8(%rsp), %r14d
movl $.Lstr, %edi
callq puts@PLT
testl %ebx, %ebx
jle .LBB2_22
# %bb.18: # %.lr.ph78
leal (%r15,%r14), %eax
decl %eax
cltd
idivl %r15d
movl %eax, %ebp
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rbp
movl %r15d, %r15d
orq %rax, %r15
movl %ebx, %r14d
jmp .LBB2_19
.p2align 4, 0x90
.LBB2_21: # in Loop: Header=BB2_19 Depth=1
decl %r14d
je .LBB2_22
.LBB2_19: # =>This Inner Loop Header: Depth=1
movq %rbp, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_21
# %bb.20: # in Loop: Header=BB2_19 Depth=1
movl 8(%rsp), %eax
movl %eax, 12(%rsp)
leaq 144(%rsp), %rax
movq %rax, 80(%rsp)
leaq 136(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 128(%rsp), %rax
movq %rax, 104(%rsp)
leaq 120(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
movl $_Z12jacobiKernelPfS_iS_S_, %edi
leaq 80(%rsp), %r9
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB2_21
.LBB2_22: # %._crit_edge79
movslq 8(%rsp), %rdx
shlq $2, %rdx
movq 16(%rsp), %r14 # 8-byte Reload
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
callq hipFree
callq hipFree
callq hipFree
callq hipFree
movq %r12, %rdi
callq free
movq %r13, %rdi
callq free
movl $.L.str.9, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
cmpl $0, 8(%rsp)
jle .LBB2_25
# %bb.23: # %.lr.ph82.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_24: # %.lr.ph82
# =>This Inner Loop Header: Depth=1
movss (%r14,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.10, %edi
movl %ebx, %esi
movb $1, %al
callq printf
incq %rbx
movslq 8(%rsp), %rax
cmpq %rax, %rbx
jl .LBB2_24
.LBB2_25: # %._crit_edge83
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_8:
.cfi_def_cfa_offset 208
callq abort
.LBB2_26:
movl $1, %edi
callq exit
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12jacobiKernelPfS_iS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12jacobiKernelPfS_iS_S_,@object # @_Z12jacobiKernelPfS_iS_S_
.section .rodata,"a",@progbits
.globl _Z12jacobiKernelPfS_iS_S_
.p2align 3, 0x0
_Z12jacobiKernelPfS_iS_S_:
.quad _Z27__device_stub__jacobiKernelPfS_iS_S_
.size _Z12jacobiKernelPfS_iS_S_, 8
.type _ZZ10parse_argviPPcS0_PiS1_E12long_options,@object # @_ZZ10parse_argviPPcS0_PiS1_E12long_options
.data
.p2align 4, 0x0
_ZZ10parse_argviPPcS0_PiS1_E12long_options:
.quad .L.str
.long 1 # 0x1
.zero 4
.quad 0
.long 102 # 0x66
.zero 4
.quad .L.str.1
.long 2 # 0x2
.zero 4
.quad 0
.long 105 # 0x69
.zero 4
.quad .L.str.2
.long 2 # 0x2
.zero 4
.quad 0
.long 98 # 0x62
.zero 4
.zero 32
.size _ZZ10parse_argviPPcS0_PiS1_E12long_options, 128
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "file"
.size .L.str, 5
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "iterations"
.size .L.str.1, 11
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "blockSize"
.size .L.str.2, 10
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "f:i:b:"
.size .L.str.3, 7
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "r"
.size .L.str.4, 2
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "%d"
.size .L.str.5, 3
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "N = %d, iter = %d, blocksize = %d\n"
.size .L.str.6, 35
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "%f"
.size .L.str.7, 3
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "\nResult after %d iterations:\n"
.size .L.str.9, 30
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "x[%d] = %f\n"
.size .L.str.10, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12jacobiKernelPfS_iS_S_"
.size .L__unnamed_1, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Running Jacobi method..."
.size .Lstr, 25
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__jacobiKernelPfS_iS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12jacobiKernelPfS_iS_S_
.addrsig_sym _ZZ10parse_argviPPcS0_PiS1_E12long_options
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000877ce_00000000-6_jacobi.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "f:i:b:"
.text
.globl _Z10parse_argviPPcS0_PiS1_
.type _Z10parse_argviPPcS0_PiS1_, @function
_Z10parse_argviPPcS0_PiS1_:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl %edi, %ebp
movq %rsi, %rbx
movq %rdx, %r15
movq %rcx, %r14
movq %r8, 8(%rsp)
leaq _ZZ10parse_argviPPcS0_PiS1_E12long_options(%rip), %r13
leaq .LC0(%rip), %r12
jmp .L4
.L15:
cmpl $98, %eax
jne .L13
movl $10, %edx
movl $0, %esi
movq optarg(%rip), %rdi
call __isoc23_strtol@PLT
movq 8(%rsp), %rcx
movl %eax, (%rcx)
jmp .L4
.L5:
movq optarg(%rip), %rax
movq %rax, (%r15)
.L4:
movl $0, %r8d
movq %r13, %rcx
movq %r12, %rdx
movq %rbx, %rsi
movl %ebp, %edi
call getopt_long@PLT
cmpl $-1, %eax
je .L14
cmpl $102, %eax
je .L5
cmpl $105, %eax
jne .L15
movl $10, %edx
movl $0, %esi
movq optarg(%rip), %rdi
call __isoc23_strtol@PLT
movl %eax, (%r14)
jmp .L4
.L13:
call abort@PLT
.L14:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z10parse_argviPPcS0_PiS1_, .-_Z10parse_argviPPcS0_PiS1_
.globl _Z39__device_stub__Z12jacobiKernelPfS_iS_S_PfS_iS_S_
.type _Z39__device_stub__Z12jacobiKernelPfS_iS_S_PfS_iS_S_, @function
_Z39__device_stub__Z12jacobiKernelPfS_iS_S_PfS_iS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movl %edx, 28(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z12jacobiKernelPfS_iS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z39__device_stub__Z12jacobiKernelPfS_iS_S_PfS_iS_S_, .-_Z39__device_stub__Z12jacobiKernelPfS_iS_S_PfS_iS_S_
.globl _Z12jacobiKernelPfS_iS_S_
.type _Z12jacobiKernelPfS_iS_S_, @function
_Z12jacobiKernelPfS_iS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z12jacobiKernelPfS_iS_S_PfS_iS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z12jacobiKernelPfS_iS_S_, .-_Z12jacobiKernelPfS_iS_S_
.section .rodata.str1.1
.LC1:
.string "r"
.LC2:
.string "%d"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "N = %d, iter = %d, blocksize = %d\n"
.section .rodata.str1.1
.LC4:
.string "%f"
.LC5:
.string "Running Jacobi method...\n"
.LC6:
.string "\nResult after %d iterations:\n"
.LC7:
.string "x[%d] = %f\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $10000, 48(%rsp)
movl $512, 52(%rsp)
movq $0, 56(%rsp)
leaq 48(%rsp), %rcx
leaq 56(%rsp), %rdx
leaq 52(%rsp), %r8
call _Z10parse_argviPPcS0_PiS1_
leaq .LC1(%rip), %rsi
movq 56(%rsp), %rdi
call fopen@PLT
testq %rax, %rax
je .L42
movq %rax, %r12
leaq 44(%rsp), %rdx
leaq .LC2(%rip), %rsi
movq %rax, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movl 52(%rsp), %r8d
movl %r8d, 20(%rsp)
movl 48(%rsp), %eax
movl %eax, 16(%rsp)
movl %eax, %ecx
movl 44(%rsp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 44(%rsp), %ebx
movl %ebx, %edi
imull %ebx, %edi
movslq %edi, %rdi
movl $4, %esi
call calloc@PLT
movq %rax, %r14
movslq %ebx, %rbp
movl $4, %esi
movq %rbp, %rdi
call calloc@PLT
movq %rax, %r15
movq %rax, 24(%rsp)
movl $4, %esi
movq %rbp, %rdi
call calloc@PLT
movq %rax, 8(%rsp)
testl %ebx, %ebx
jle .L26
movl $0, %ebp
leaq .LC4(%rip), %r13
jmp .L27
.L42:
movl $1, %edi
call exit@PLT
.L28:
imull %ebp, %eax
addl %ebx, %eax
cltq
leaq (%r14,%rax,4), %rdx
movq %r13, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
addl $1, %ebx
movl 44(%rsp), %eax
cmpl %ebx, %eax
jg .L28
.L29:
movq %r15, %rdx
movq %r13, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
addl $1, %ebp
movl 44(%rsp), %ebx
addq $4, %r15
cmpl %ebp, %ebx
jle .L26
.L27:
movl 44(%rsp), %eax
movl $0, %ebx
testl %eax, %eax
jg .L28
jmp .L29
.L26:
movslq %ebx, %rbx
imulq %rbx, %rbx
leaq 0(,%rbx,4), %rdx
movl $1, %ecx
movq %r14, %rsi
movl $0, %edi
call cudaMemcpy@PLT
movslq 44(%rsp), %rdx
salq $2, %rdx
movl $1, %ecx
movq 24(%rsp), %rsi
movl $0, %edi
call cudaMemcpy@PLT
movslq 44(%rsp), %rdx
salq $2, %rdx
movl $1, %ecx
movq 8(%rsp), %rsi
movl $0, %r12d
movq %r12, %rdi
call cudaMemcpy@PLT
movl 20(%rsp), %ecx
movl %ecx, %eax
addl 44(%rsp), %eax
subl $1, %eax
cltd
idivl %ecx
movl %eax, %ebp
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 16(%rsp)
jle .L30
movl $0, %ebx
jmp .L32
.L31:
addl $1, %ebx
cmpl %ebx, 16(%rsp)
je .L30
.L32:
movl 20(%rsp), %eax
movl %eax, 76(%rsp)
movl $1, 80(%rsp)
movl %ebp, 64(%rsp)
movl $1, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L31
movl $0, %r8d
movl $0, %r12d
movq %r12, %rcx
movl 44(%rsp), %edx
movl $0, %edi
movq %rdi, %rsi
call _Z39__device_stub__Z12jacobiKernelPfS_iS_S_PfS_iS_S_
jmp .L31
.L30:
movslq 44(%rsp), %rdx
salq $2, %rdx
movl $2, %ecx
movl $0, %r12d
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %edi
call cudaFree@PLT
movl $0, %edi
call cudaFree@PLT
movq %r12, %rdi
call cudaFree@PLT
movl $0, %edi
call cudaFree@PLT
movq %r14, %rdi
call free@PLT
movq 24(%rsp), %rdi
call free@PLT
movl 16(%rsp), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 44(%rsp)
jle .L33
movl $0, %ebx
leaq .LC7(%rip), %rbp
.L34:
movq 8(%rsp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbx,4), %xmm0
movl %ebx, %edx
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpl %ebx, 44(%rsp)
jg .L34
.L33:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L43
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z12jacobiKernelPfS_iS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z12jacobiKernelPfS_iS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.str1.1
.LC9:
.string "file"
.LC10:
.string "iterations"
.LC11:
.string "blockSize"
.section .data.rel.local,"aw"
.align 32
.type _ZZ10parse_argviPPcS0_PiS1_E12long_options, @object
.size _ZZ10parse_argviPPcS0_PiS1_E12long_options, 128
_ZZ10parse_argviPPcS0_PiS1_E12long_options:
.quad .LC9
.long 1
.zero 4
.quad 0
.long 102
.zero 4
.quad .LC10
.long 2
.zero 4
.quad 0
.long 105
.zero 4
.quad .LC11
.long 2
.zero 4
.quad 0
.long 98
.zero 4
.quad 0
.long 0
.zero 4
.quad 0
.long 0
.zero 4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "jacobi.hip"
.globl _Z27__device_stub__jacobiKernelPfS_iS_S_ # -- Begin function _Z27__device_stub__jacobiKernelPfS_iS_S_
.p2align 4, 0x90
.type _Z27__device_stub__jacobiKernelPfS_iS_S_,@function
_Z27__device_stub__jacobiKernelPfS_iS_S_: # @_Z27__device_stub__jacobiKernelPfS_iS_S_
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 72(%rsp)
movq %r8, 64(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12jacobiKernelPfS_iS_S_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z27__device_stub__jacobiKernelPfS_iS_S_, .Lfunc_end0-_Z27__device_stub__jacobiKernelPfS_iS_S_
.cfi_endproc
# -- End function
.globl _Z10parse_argviPPcS0_PiS1_ # -- Begin function _Z10parse_argviPPcS0_PiS1_
.p2align 4, 0x90
.type _Z10parse_argviPPcS0_PiS1_,@function
_Z10parse_argviPPcS0_PiS1_: # @_Z10parse_argviPPcS0_PiS1_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r8, %rbx
movq %rcx, %r14
movq %rdx, %r15
movq %rsi, %r12
movl %edi, %ebp
jmp .LBB1_1
.p2align 4, 0x90
.LBB1_8: # in Loop: Header=BB1_1 Depth=1
movq optarg(%rip), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, (%r14)
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl $.L.str.3, %edx
movl $_ZZ10parse_argviPPcS0_PiS1_E12long_options, %ecx
movl %ebp, %edi
movq %r12, %rsi
xorl %r8d, %r8d
callq getopt_long
cmpl $101, %eax
jle .LBB1_2
# %bb.5: # in Loop: Header=BB1_1 Depth=1
cmpl $105, %eax
je .LBB1_8
# %bb.6: # in Loop: Header=BB1_1 Depth=1
cmpl $102, %eax
jne .LBB1_4
# %bb.7: # in Loop: Header=BB1_1 Depth=1
movq optarg(%rip), %rax
movq %rax, (%r15)
jmp .LBB1_1
.p2align 4, 0x90
.LBB1_2: # in Loop: Header=BB1_1 Depth=1
cmpl $98, %eax
jne .LBB1_3
# %bb.9: # in Loop: Header=BB1_1 Depth=1
movq optarg(%rip), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, (%rbx)
jmp .LBB1_1
.LBB1_3:
cmpl $-1, %eax
jne .LBB1_4
# %bb.10:
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_4:
.cfi_def_cfa_offset 48
callq abort
.Lfunc_end1:
.size _Z10parse_argviPPcS0_PiS1_, .Lfunc_end1-_Z10parse_argviPPcS0_PiS1_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r14
movl %edi, %ebp
movl $10000, %ebx # imm = 0x2710
movl $512, %r15d # imm = 0x200
xorl %r12d, %r12d
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_6: # in Loop: Header=BB2_1 Depth=1
movq optarg(%rip), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
.LBB2_1: # =>This Inner Loop Header: Depth=1
movl $.L.str.3, %edx
movl $_ZZ10parse_argviPPcS0_PiS1_E12long_options, %ecx
movl %ebp, %edi
movq %r14, %rsi
xorl %r8d, %r8d
callq getopt_long
cmpl $105, %eax
je .LBB2_6
# %bb.2: # in Loop: Header=BB2_1 Depth=1
cmpl $-1, %eax
je .LBB2_9
# %bb.3: # in Loop: Header=BB2_1 Depth=1
cmpl $98, %eax
je .LBB2_7
# %bb.4: # in Loop: Header=BB2_1 Depth=1
cmpl $102, %eax
jne .LBB2_8
# %bb.5: # in Loop: Header=BB2_1 Depth=1
movq optarg(%rip), %r12
jmp .LBB2_1
.LBB2_7: # in Loop: Header=BB2_1 Depth=1
movq optarg(%rip), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
jmp .LBB2_1
.LBB2_9: # %_Z10parse_argviPPcS0_PiS1_.exit
movl $.L.str.4, %esi
movq %r12, %rdi
callq fopen
testq %rax, %rax
je .LBB2_26
# %bb.10:
movq %rax, %rbp
leaq 8(%rsp), %rdx
movl $.L.str.5, %esi
movq %rax, %rdi
xorl %eax, %eax
callq __isoc23_fscanf
movl 8(%rsp), %esi
movl $.L.str.6, %edi
movl %ebx, %edx
movl %r15d, %ecx
xorl %eax, %eax
callq printf
movslq 8(%rsp), %r14
movl %r14d, %edi
imull %edi, %edi
movl $4, %esi
callq calloc
movq %rax, %r12
movl $4, %esi
movq %r14, %rdi
callq calloc
movq %rax, %r13
movl $4, %esi
movq %r14, %rdi
callq calloc
movq %rax, %r14
movl 8(%rsp), %eax
testl %eax, %eax
movq %r14, 16(%rsp) # 8-byte Spill
jle .LBB2_17
# %bb.11: # %.preheader.preheader
movq %r15, 24(%rsp) # 8-byte Spill
xorl %r15d, %r15d
jmp .LBB2_12
.p2align 4, 0x90
.LBB2_15: # %._crit_edge
# in Loop: Header=BB2_12 Depth=1
leaq (,%r15,4), %rdx
addq %r13, %rdx
movl $.L.str.7, %esi
movq %rbp, %rdi
xorl %eax, %eax
callq __isoc23_fscanf
incq %r15
movslq 8(%rsp), %rax
cmpq %rax, %r15
jge .LBB2_16
.LBB2_12: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_14 Depth 2
movl 8(%rsp), %eax
testl %eax, %eax
jle .LBB2_15
# %bb.13: # %.lr.ph.preheader
# in Loop: Header=BB2_12 Depth=1
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_14: # %.lr.ph
# Parent Loop BB2_12 Depth=1
# => This Inner Loop Header: Depth=2
imull %r15d, %eax
cltq
addq %r14, %rax
leaq (%r12,%rax,4), %rdx
movl $.L.str.7, %esi
movq %rbp, %rdi
xorl %eax, %eax
callq __isoc23_fscanf
movl 8(%rsp), %eax
incq %r14
cmpl %eax, %r14d
jl .LBB2_14
jmp .LBB2_15
.LBB2_16:
movq 16(%rsp), %r14 # 8-byte Reload
movq 24(%rsp), %r15 # 8-byte Reload
.LBB2_17: # %._crit_edge75
movslq %eax, %rdx
imulq %rdx, %rdx
shlq $2, %rdx
movq %r12, %rsi
movl $1, %ecx
callq hipMemcpy
movslq 8(%rsp), %rdx
shlq $2, %rdx
movq %r13, %rsi
movl $1, %ecx
callq hipMemcpy
movslq 8(%rsp), %rdx
shlq $2, %rdx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movl 8(%rsp), %r14d
movl $.Lstr, %edi
callq puts@PLT
testl %ebx, %ebx
jle .LBB2_22
# %bb.18: # %.lr.ph78
leal (%r15,%r14), %eax
decl %eax
cltd
idivl %r15d
movl %eax, %ebp
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rbp
movl %r15d, %r15d
orq %rax, %r15
movl %ebx, %r14d
jmp .LBB2_19
.p2align 4, 0x90
.LBB2_21: # in Loop: Header=BB2_19 Depth=1
decl %r14d
je .LBB2_22
.LBB2_19: # =>This Inner Loop Header: Depth=1
movq %rbp, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_21
# %bb.20: # in Loop: Header=BB2_19 Depth=1
movl 8(%rsp), %eax
movl %eax, 12(%rsp)
leaq 144(%rsp), %rax
movq %rax, 80(%rsp)
leaq 136(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 128(%rsp), %rax
movq %rax, 104(%rsp)
leaq 120(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
movl $_Z12jacobiKernelPfS_iS_S_, %edi
leaq 80(%rsp), %r9
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB2_21
.LBB2_22: # %._crit_edge79
movslq 8(%rsp), %rdx
shlq $2, %rdx
movq 16(%rsp), %r14 # 8-byte Reload
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
callq hipFree
callq hipFree
callq hipFree
callq hipFree
movq %r12, %rdi
callq free
movq %r13, %rdi
callq free
movl $.L.str.9, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
cmpl $0, 8(%rsp)
jle .LBB2_25
# %bb.23: # %.lr.ph82.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_24: # %.lr.ph82
# =>This Inner Loop Header: Depth=1
movss (%r14,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.10, %edi
movl %ebx, %esi
movb $1, %al
callq printf
incq %rbx
movslq 8(%rsp), %rax
cmpq %rax, %rbx
jl .LBB2_24
.LBB2_25: # %._crit_edge83
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_8:
.cfi_def_cfa_offset 208
callq abort
.LBB2_26:
movl $1, %edi
callq exit
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12jacobiKernelPfS_iS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12jacobiKernelPfS_iS_S_,@object # @_Z12jacobiKernelPfS_iS_S_
.section .rodata,"a",@progbits
.globl _Z12jacobiKernelPfS_iS_S_
.p2align 3, 0x0
_Z12jacobiKernelPfS_iS_S_:
.quad _Z27__device_stub__jacobiKernelPfS_iS_S_
.size _Z12jacobiKernelPfS_iS_S_, 8
.type _ZZ10parse_argviPPcS0_PiS1_E12long_options,@object # @_ZZ10parse_argviPPcS0_PiS1_E12long_options
.data
.p2align 4, 0x0
_ZZ10parse_argviPPcS0_PiS1_E12long_options:
.quad .L.str
.long 1 # 0x1
.zero 4
.quad 0
.long 102 # 0x66
.zero 4
.quad .L.str.1
.long 2 # 0x2
.zero 4
.quad 0
.long 105 # 0x69
.zero 4
.quad .L.str.2
.long 2 # 0x2
.zero 4
.quad 0
.long 98 # 0x62
.zero 4
.zero 32
.size _ZZ10parse_argviPPcS0_PiS1_E12long_options, 128
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "file"
.size .L.str, 5
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "iterations"
.size .L.str.1, 11
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "blockSize"
.size .L.str.2, 10
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "f:i:b:"
.size .L.str.3, 7
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "r"
.size .L.str.4, 2
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "%d"
.size .L.str.5, 3
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "N = %d, iter = %d, blocksize = %d\n"
.size .L.str.6, 35
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "%f"
.size .L.str.7, 3
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "\nResult after %d iterations:\n"
.size .L.str.9, 30
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "x[%d] = %f\n"
.size .L.str.10, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12jacobiKernelPfS_iS_S_"
.size .L__unnamed_1, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Running Jacobi method..."
.size .Lstr, 25
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__jacobiKernelPfS_iS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12jacobiKernelPfS_iS_S_
.addrsig_sym _ZZ10parse_argviPPcS0_PiS1_E12long_options
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /* Matrix Multiplication AX+Y in Cuda
*******************************************************************
* Description:
* Populate a float array of size N^2 with each index
* generated M times using mulitplication.
*******************************************************************
* Source:
* https://stackoverflow.com/questions/7663343/simplest-possible-example-to-show-gpu-outperform-cpu-using-cuda
*******************************************************************
*/
#include <ctime>
#include <iostream>
using namespace std;
/* matrix mult function */
__global__
void matrix_mult(int n, int r, float *matrix) {
// give index to each thread
int index = blockIdx.x * blockDim.x + threadIdx.x;
// assign i / N at current index
matrix[index] = 1.0f * index / n;
// generate a new value at current index r times
for (int j = 0; j < r; j++) {
matrix[index] = matrix[index] * matrix[index] - 0.25f;
}
}
/* program's main() */
int main(int argc, char* argv[]) {
// initialize default array size and run count, and threads per block
int N = 1024;
int R = 1000;
int T = 256;
// assign new values to N (and R) if arguments provided
if (argc > 2) {
// iterate over arguments
for (int i = 0; i < argc; i++) {
// get current argument
string arg = argv[i];
// if size specified
if (arg.compare("-n") == 0) {
N = stoi(argv[i + 1]);
}
// if run count specified
else if (arg.compare("-r") == 0) {
R = stoi(argv[i + 1]);
}
// if thread count specified
else if (arg.compare("-t") == 0) {
T = stoi(argv[i + 1]);
}
}
}
// calculate full size of matrix
unsigned int N_squared = N * N;
// print info
cout << "========================================" << endl;
cout << "|\tMatrix Multiplication" << endl;
cout << "========================================" << endl;
cout << "|\tUsing CUDA 9.2" << endl;
cout << "|\tN = " << N << "x" << N << " (=" << N_squared << ")"<< endl;
cout << "|\tRuns = " << R << endl;
cout << "|\tThreads/Block = " << T << endl;
cout << "|" << endl;
cout << "|\trunning..." << endl;
// initialize the float array (using 1D array to simulate 2D array or matrix)
float *matrix;
// allocate unified memory
cudaMallocManaged(&matrix, N_squared * sizeof(float));
// initialize clock
clock_t start = clock();
// perform matrix mult on CPU
matrix_mult<<<(N_squared + T - 1) / T, T>>>(N_squared, R, matrix);
// wait for GPU before continuing on CPU
cudaDeviceSynchronize();
// stop clock
clock_t stop = clock();
// print end status
cout << "|\t done!" << endl;
cout << "|" << endl;
cout << "|\tTime = " << (stop - start) / (double) CLOCKS_PER_SEC << " seconds" << endl;
cout << "========================================" << endl;
// free allocated memory
cudaFree(matrix);
return 0;
} | code for sm_80
Function : _Z11matrix_multiiPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ I2F R4, c[0x0][0x160] ; /* 0x0000580000047b06 */
/* 0x000e620000201400 */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ BSSY B0, 0x130 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e2a0000002100 */
/*0060*/ MUFU.RCP R5, R4 ; /* 0x0000000400057308 */
/* 0x002e620000001000 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fce00078e0203 */
/*0080*/ I2F R3, R0 ; /* 0x0000000000037306 */
/* 0x000e220000201400 */
/*0090*/ FFMA R2, -R4, R5, 1 ; /* 0x3f80000004027423 */
/* 0x002fc80000000105 */
/*00a0*/ FFMA R2, R5, R2, R5 ; /* 0x0000000205027223 */
/* 0x000fc60000000005 */
/*00b0*/ FCHK P0, R3, R4 ; /* 0x0000000403007302 */
/* 0x001e220000000000 */
/*00c0*/ FFMA R5, R3, R2, RZ ; /* 0x0000000203057223 */
/* 0x000fc800000000ff */
/*00d0*/ FFMA R6, -R4, R5, R3 ; /* 0x0000000504067223 */
/* 0x000fc80000000103 */
/*00e0*/ FFMA R5, R2, R6, R5 ; /* 0x0000000602057223 */
/* 0x000fe20000000005 */
/*00f0*/ @!P0 BRA 0x120 ; /* 0x0000002000008947 */
/* 0x001fea0003800000 */
/*0100*/ MOV R2, 0x120 ; /* 0x0000012000027802 */
/* 0x000fe40000000f00 */
/*0110*/ CALL.REL.NOINC 0x540 ; /* 0x0000042000007944 */
/* 0x000fea0003c00000 */
/*0120*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0130*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff047624 */
/* 0x000fe400078e00ff */
/*0140*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc600078e00ff */
/*0150*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fe20003f06270 */
/*0160*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fca00078e0203 */
/*0170*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x0001ee000c101904 */
/*0180*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0190*/ IADD3 R0, R4, -0x1, RZ ; /* 0xffffffff04007810 */
/* 0x000fc80007ffe0ff */
/*01a0*/ ISETP.GE.U32.AND P0, PT, R0, 0x3, PT ; /* 0x000000030000780c */
/* 0x000fe40003f06070 */
/*01b0*/ LOP3.LUT R0, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304007812 */
/* 0x000fd600078ec0ff */
/*01c0*/ @!P0 BRA 0x4c0 ; /* 0x000002f000008947 */
/* 0x000fea0003800000 */
/*01d0*/ IADD3 R4, -R0, c[0x0][0x164], RZ ; /* 0x0000590000047a10 */
/* 0x000fc80007ffe1ff */
/*01e0*/ ISETP.GT.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fda0003f04270 */
/*01f0*/ @!P0 BRA 0x450 ; /* 0x0000025000008947 */
/* 0x000fea0003800000 */
/*0200*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */
/* 0x000fe40003f24270 */
/*0210*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0220*/ @!P1 BRA 0x370 ; /* 0x0000014000009947 */
/* 0x000fea0003800000 */
/*0230*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0240*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x001fe20000000005 */
/*0250*/ IADD3 R4, R4, -0x10, RZ ; /* 0xfffffff004047810 */
/* 0x000fc60007ffe0ff */
/*0260*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fe20000000005 */
/*0270*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */
/* 0x000fc60003f24270 */
/*0280*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0290*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*02a0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*02b0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*02c0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*02d0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*02e0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*02f0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0300*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0310*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0320*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0330*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0340*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0350*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fe20000000005 */
/*0360*/ @P1 BRA 0x240 ; /* 0xfffffed000001947 */
/* 0x000fea000383ffff */
/*0370*/ ISETP.GT.AND P1, PT, R4, 0x4, PT ; /* 0x000000040400780c */
/* 0x000fda0003f24270 */
/*0380*/ @!P1 BRA 0x430 ; /* 0x000000a000009947 */
/* 0x000fea0003800000 */
/*0390*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x001fe20000000005 */
/*03a0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*03b0*/ IADD3 R4, R4, -0x8, RZ ; /* 0xfffffff804047810 */
/* 0x000fe20007ffe0ff */
/*03c0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*03d0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*03e0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*03f0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0400*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0410*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0420*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fe40000000005 */
/*0430*/ ISETP.NE.OR P0, PT, R4, RZ, P0 ; /* 0x000000ff0400720c */
/* 0x000fda0000705670 */
/*0440*/ @!P0 BRA 0x4c0 ; /* 0x0000007000008947 */
/* 0x000fea0003800000 */
/*0450*/ IADD3 R4, R4, -0x4, RZ ; /* 0xfffffffc04047810 */
/* 0x000fe20007ffe0ff */
/*0460*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x001fc60000000005 */
/*0470*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f05270 */
/*0480*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0490*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*04a0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*04b0*/ @P0 BRA 0x450 ; /* 0xffffff9000000947 */
/* 0x000fea000383ffff */
/*04c0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*04d0*/ @!P0 BRA 0x520 ; /* 0x0000004000008947 */
/* 0x000fea0003800000 */
/*04e0*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */
/* 0x000fe20007ffe0ff */
/*04f0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x001fc60000000005 */
/*0500*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*0510*/ @P0 BRA 0x4e0 ; /* 0xffffffc000000947 */
/* 0x000fea000383ffff */
/*0520*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0530*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0540*/ SHF.R.U32.HI R6, RZ, 0x17, R4.reuse ; /* 0x00000017ff067819 */
/* 0x100fe20000011604 */
/*0550*/ BSSY B1, 0xba0 ; /* 0x0000064000017945 */
/* 0x000fe20003800000 */
/*0560*/ SHF.R.U32.HI R5, RZ, 0x17, R3.reuse ; /* 0x00000017ff057819 */
/* 0x100fe20000011603 */
/*0570*/ IMAD.MOV.U32 R7, RZ, RZ, R3 ; /* 0x000000ffff077224 */
/* 0x000fe200078e0003 */
/*0580*/ LOP3.LUT R6, R6, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff06067812 */
/* 0x000fe200078ec0ff */
/*0590*/ IMAD.MOV.U32 R8, RZ, RZ, R4 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0004 */
/*05a0*/ LOP3.LUT R5, R5, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff05057812 */
/* 0x000fe400078ec0ff */
/*05b0*/ IADD3 R11, R6, -0x1, RZ ; /* 0xffffffff060b7810 */
/* 0x000fe40007ffe0ff */
/*05c0*/ IADD3 R10, R5, -0x1, RZ ; /* 0xffffffff050a7810 */
/* 0x000fc40007ffe0ff */
/*05d0*/ ISETP.GT.U32.AND P0, PT, R11, 0xfd, PT ; /* 0x000000fd0b00780c */
/* 0x000fc80003f04070 */
/*05e0*/ ISETP.GT.U32.OR P0, PT, R10, 0xfd, P0 ; /* 0x000000fd0a00780c */
/* 0x000fda0000704470 */
/*05f0*/ @!P0 IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff098224 */
/* 0x000fe200078e00ff */
/*0600*/ @!P0 BRA 0x780 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*0610*/ FSETP.GTU.FTZ.AND P0, PT, |R3|, +INF , PT ; /* 0x7f8000000300780b */
/* 0x000fe40003f1c200 */
/*0620*/ FSETP.GTU.FTZ.AND P1, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */
/* 0x000fc80003f3c200 */
/*0630*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000703570 */
/*0640*/ @P0 BRA 0xb80 ; /* 0x0000053000000947 */
/* 0x000fea0003800000 */
/*0650*/ LOP3.LUT P0, RZ, R8, 0x7fffffff, R7, 0xc8, !PT ; /* 0x7fffffff08ff7812 */
/* 0x000fda000780c807 */
/*0660*/ @!P0 BRA 0xb60 ; /* 0x000004f000008947 */
/* 0x000fea0003800000 */
/*0670*/ FSETP.NEU.FTZ.AND P2, PT, |R3|.reuse, +INF , PT ; /* 0x7f8000000300780b */
/* 0x040fe40003f5d200 */
/*0680*/ FSETP.NEU.FTZ.AND P1, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */
/* 0x000fe40003f3d200 */
/*0690*/ FSETP.NEU.FTZ.AND P0, PT, |R3|, +INF , PT ; /* 0x7f8000000300780b */
/* 0x000fd60003f1d200 */
/*06a0*/ @!P1 BRA !P2, 0xb60 ; /* 0x000004b000009947 */
/* 0x000fea0005000000 */
/*06b0*/ LOP3.LUT P2, RZ, R7, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff07ff7812 */
/* 0x000fc8000784c0ff */
/*06c0*/ PLOP3.LUT P1, PT, P1, P2, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000f24572 */
/*06d0*/ @P1 BRA 0xb40 ; /* 0x0000046000001947 */
/* 0x000fea0003800000 */
/*06e0*/ LOP3.LUT P1, RZ, R8, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff08ff7812 */
/* 0x000fc8000782c0ff */
/*06f0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000702572 */
/*0700*/ @P0 BRA 0xb10 ; /* 0x0000040000000947 */
/* 0x000fea0003800000 */
/*0710*/ ISETP.GE.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fe40003f06270 */
/*0720*/ ISETP.GE.AND P1, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */
/* 0x000fd60003f26270 */
/*0730*/ @P0 IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff090224 */
/* 0x000fe400078e00ff */
/*0740*/ @!P0 IMAD.MOV.U32 R9, RZ, RZ, -0x40 ; /* 0xffffffc0ff098424 */
/* 0x000fe400078e00ff */
/*0750*/ @!P0 FFMA R7, R3, 1.84467440737095516160e+19, RZ ; /* 0x5f80000003078823 */
/* 0x000fe400000000ff */
/*0760*/ @!P1 FFMA R8, R4, 1.84467440737095516160e+19, RZ ; /* 0x5f80000004089823 */
/* 0x000fe200000000ff */
/*0770*/ @!P1 IADD3 R9, R9, 0x40, RZ ; /* 0x0000004009099810 */
/* 0x000fe40007ffe0ff */
/*0780*/ LEA R3, R6, 0xc0800000, 0x17 ; /* 0xc080000006037811 */
/* 0x000fe200078eb8ff */
/*0790*/ BSSY B2, 0xb00 ; /* 0x0000036000027945 */
/* 0x000fe20003800000 */
/*07a0*/ IADD3 R5, R5, -0x7f, RZ ; /* 0xffffff8105057810 */
/* 0x000fc60007ffe0ff */
/*07b0*/ IMAD.IADD R8, R8, 0x1, -R3 ; /* 0x0000000108087824 */
/* 0x000fe200078e0a03 */
/*07c0*/ IADD3 R6, R5.reuse, 0x7f, -R6 ; /* 0x0000007f05067810 */
/* 0x040fe20007ffe806 */
/*07d0*/ IMAD R7, R5, -0x800000, R7 ; /* 0xff80000005077824 */
/* 0x000fe400078e0207 */
/*07e0*/ MUFU.RCP R3, R8 ; /* 0x0000000800037308 */
/* 0x000e220000001000 */
/*07f0*/ FADD.FTZ R4, -R8, -RZ ; /* 0x800000ff08047221 */
/* 0x000fe40000010100 */
/*0800*/ IMAD.IADD R6, R6, 0x1, R9 ; /* 0x0000000106067824 */
/* 0x000fe400078e0209 */
/*0810*/ FFMA R10, R3, R4, 1 ; /* 0x3f800000030a7423 */
/* 0x001fc80000000004 */
/*0820*/ FFMA R12, R3, R10, R3 ; /* 0x0000000a030c7223 */
/* 0x000fc80000000003 */
/*0830*/ FFMA R3, R7, R12, RZ ; /* 0x0000000c07037223 */
/* 0x000fc800000000ff */
/*0840*/ FFMA R10, R4, R3, R7 ; /* 0x00000003040a7223 */
/* 0x000fc80000000007 */
/*0850*/ FFMA R11, R12, R10, R3 ; /* 0x0000000a0c0b7223 */
/* 0x000fc80000000003 */
/*0860*/ FFMA R7, R4, R11, R7 ; /* 0x0000000b04077223 */
/* 0x000fc80000000007 */
/*0870*/ FFMA R3, R12, R7, R11 ; /* 0x000000070c037223 */
/* 0x000fca000000000b */
/*0880*/ SHF.R.U32.HI R4, RZ, 0x17, R3 ; /* 0x00000017ff047819 */
/* 0x000fc80000011603 */
/*0890*/ LOP3.LUT R4, R4, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff04047812 */
/* 0x000fca00078ec0ff */
/*08a0*/ IMAD.IADD R8, R4, 0x1, R6 ; /* 0x0000000104087824 */
/* 0x000fca00078e0206 */
/*08b0*/ IADD3 R4, R8, -0x1, RZ ; /* 0xffffffff08047810 */
/* 0x000fc80007ffe0ff */
/*08c0*/ ISETP.GE.U32.AND P0, PT, R4, 0xfe, PT ; /* 0x000000fe0400780c */
/* 0x000fda0003f06070 */
/*08d0*/ @!P0 BRA 0xae0 ; /* 0x0000020000008947 */
/* 0x000fea0003800000 */
/*08e0*/ ISETP.GT.AND P0, PT, R8, 0xfe, PT ; /* 0x000000fe0800780c */
/* 0x000fda0003f04270 */
/*08f0*/ @P0 BRA 0xab0 ; /* 0x000001b000000947 */
/* 0x000fea0003800000 */
/*0900*/ ISETP.GE.AND P0, PT, R8, 0x1, PT ; /* 0x000000010800780c */
/* 0x000fda0003f06270 */
/*0910*/ @P0 BRA 0xaf0 ; /* 0x000001d000000947 */
/* 0x000fea0003800000 */
/*0920*/ ISETP.GE.AND P0, PT, R8, -0x18, PT ; /* 0xffffffe80800780c */
/* 0x000fe40003f06270 */
/*0930*/ LOP3.LUT R3, R3, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000003037812 */
/* 0x000fd600078ec0ff */
/*0940*/ @!P0 BRA 0xaf0 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*0950*/ FFMA.RZ R4, R12, R7.reuse, R11.reuse ; /* 0x000000070c047223 */
/* 0x180fe2000000c00b */
/*0960*/ ISETP.NE.AND P2, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f45270 */
/*0970*/ FFMA.RM R5, R12, R7.reuse, R11.reuse ; /* 0x000000070c057223 */
/* 0x180fe2000000400b */
/*0980*/ ISETP.NE.AND P1, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f25270 */
/*0990*/ LOP3.LUT R6, R4, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff04067812 */
/* 0x000fe200078ec0ff */
/*09a0*/ FFMA.RP R4, R12, R7, R11 ; /* 0x000000070c047223 */
/* 0x000fe2000000800b */
/*09b0*/ IADD3 R7, R8, 0x20, RZ ; /* 0x0000002008077810 */
/* 0x000fe20007ffe0ff */
/*09c0*/ IMAD.MOV R8, RZ, RZ, -R8 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0a08 */
/*09d0*/ LOP3.LUT R6, R6, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000006067812 */
/* 0x000fe400078efcff */
/*09e0*/ FSETP.NEU.FTZ.AND P0, PT, R4, R5, PT ; /* 0x000000050400720b */
/* 0x000fc40003f1d000 */
/*09f0*/ SHF.L.U32 R7, R6, R7, RZ ; /* 0x0000000706077219 */
/* 0x000fe400000006ff */
/*0a00*/ SEL R5, R8, RZ, P2 ; /* 0x000000ff08057207 */
/* 0x000fe40001000000 */
/*0a10*/ ISETP.NE.AND P1, PT, R7, RZ, P1 ; /* 0x000000ff0700720c */
/* 0x000fe40000f25270 */
/*0a20*/ SHF.R.U32.HI R5, RZ, R5, R6 ; /* 0x00000005ff057219 */
/* 0x000fe40000011606 */
/*0a30*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40000703570 */
/*0a40*/ SHF.R.U32.HI R7, RZ, 0x1, R5 ; /* 0x00000001ff077819 */
/* 0x000fc40000011605 */
/*0a50*/ SEL R4, RZ, 0x1, !P0 ; /* 0x00000001ff047807 */
/* 0x000fc80004000000 */
/*0a60*/ LOP3.LUT R4, R4, 0x1, R7, 0xf8, !PT ; /* 0x0000000104047812 */
/* 0x000fc800078ef807 */
/*0a70*/ LOP3.LUT R4, R4, R5, RZ, 0xc0, !PT ; /* 0x0000000504047212 */
/* 0x000fca00078ec0ff */
/*0a80*/ IMAD.IADD R4, R7, 0x1, R4 ; /* 0x0000000107047824 */
/* 0x000fca00078e0204 */
/*0a90*/ LOP3.LUT R3, R4, R3, RZ, 0xfc, !PT ; /* 0x0000000304037212 */
/* 0x000fe200078efcff */
/*0aa0*/ BRA 0xaf0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0ab0*/ LOP3.LUT R3, R3, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000003037812 */
/* 0x000fc800078ec0ff */
/*0ac0*/ LOP3.LUT R3, R3, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000003037812 */
/* 0x000fe200078efcff */
/*0ad0*/ BRA 0xaf0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0ae0*/ IMAD R3, R6, 0x800000, R3 ; /* 0x0080000006037824 */
/* 0x000fe400078e0203 */
/*0af0*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0b00*/ BRA 0xb90 ; /* 0x0000008000007947 */
/* 0x000fea0003800000 */
/*0b10*/ LOP3.LUT R3, R8, 0x80000000, R7, 0x48, !PT ; /* 0x8000000008037812 */
/* 0x000fc800078e4807 */
/*0b20*/ LOP3.LUT R3, R3, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000003037812 */
/* 0x000fe200078efcff */
/*0b30*/ BRA 0xb90 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*0b40*/ LOP3.LUT R3, R8, 0x80000000, R7, 0x48, !PT ; /* 0x8000000008037812 */
/* 0x000fe200078e4807 */
/*0b50*/ BRA 0xb90 ; /* 0x0000003000007947 */
/* 0x000fea0003800000 */
/*0b60*/ MUFU.RSQ R3, -QNAN ; /* 0xffc0000000037908 */
/* 0x000e220000001400 */
/*0b70*/ BRA 0xb90 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0b80*/ FADD.FTZ R3, R3, R4 ; /* 0x0000000403037221 */
/* 0x000fe40000010000 */
/*0b90*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0ba0*/ IMAD.MOV.U32 R5, RZ, RZ, R3 ; /* 0x000000ffff057224 */
/* 0x001fe400078e0003 */
/*0bb0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x0 ; /* 0x00000000ff037424 */
/* 0x000fc800078e00ff */
/*0bc0*/ RET.REL.NODEC R2 0x0 ; /* 0xfffff43002007950 */
/* 0x000fea0003c3ffff */
/*0bd0*/ BRA 0xbd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0be0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0bf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* Matrix Multiplication AX+Y in Cuda
*******************************************************************
* Description:
* Populate a float array of size N^2 with each index
* generated M times using mulitplication.
*******************************************************************
* Source:
* https://stackoverflow.com/questions/7663343/simplest-possible-example-to-show-gpu-outperform-cpu-using-cuda
*******************************************************************
*/
#include <ctime>
#include <iostream>
using namespace std;
/* matrix mult function */
__global__
void matrix_mult(int n, int r, float *matrix) {
// give index to each thread
int index = blockIdx.x * blockDim.x + threadIdx.x;
// assign i / N at current index
matrix[index] = 1.0f * index / n;
// generate a new value at current index r times
for (int j = 0; j < r; j++) {
matrix[index] = matrix[index] * matrix[index] - 0.25f;
}
}
/* program's main() */
int main(int argc, char* argv[]) {
// initialize default array size and run count, and threads per block
int N = 1024;
int R = 1000;
int T = 256;
// assign new values to N (and R) if arguments provided
if (argc > 2) {
// iterate over arguments
for (int i = 0; i < argc; i++) {
// get current argument
string arg = argv[i];
// if size specified
if (arg.compare("-n") == 0) {
N = stoi(argv[i + 1]);
}
// if run count specified
else if (arg.compare("-r") == 0) {
R = stoi(argv[i + 1]);
}
// if thread count specified
else if (arg.compare("-t") == 0) {
T = stoi(argv[i + 1]);
}
}
}
// calculate full size of matrix
unsigned int N_squared = N * N;
// print info
cout << "========================================" << endl;
cout << "|\tMatrix Multiplication" << endl;
cout << "========================================" << endl;
cout << "|\tUsing CUDA 9.2" << endl;
cout << "|\tN = " << N << "x" << N << " (=" << N_squared << ")"<< endl;
cout << "|\tRuns = " << R << endl;
cout << "|\tThreads/Block = " << T << endl;
cout << "|" << endl;
cout << "|\trunning..." << endl;
// initialize the float array (using 1D array to simulate 2D array or matrix)
float *matrix;
// allocate unified memory
cudaMallocManaged(&matrix, N_squared * sizeof(float));
// initialize clock
clock_t start = clock();
// perform matrix mult on CPU
matrix_mult<<<(N_squared + T - 1) / T, T>>>(N_squared, R, matrix);
// wait for GPU before continuing on CPU
cudaDeviceSynchronize();
// stop clock
clock_t stop = clock();
// print end status
cout << "|\t done!" << endl;
cout << "|" << endl;
cout << "|\tTime = " << (stop - start) / (double) CLOCKS_PER_SEC << " seconds" << endl;
cout << "========================================" << endl;
// free allocated memory
cudaFree(matrix);
return 0;
} | .file "tmpxft_0003810a_00000000-6_matrix_mult_cuda.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z11matrix_multiiPfiiPf
.type _Z33__device_stub__Z11matrix_multiiPfiiPf, @function
_Z33__device_stub__Z11matrix_multiiPfiiPf:
.LFB3694:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z11matrix_multiiPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z33__device_stub__Z11matrix_multiiPfiiPf, .-_Z33__device_stub__Z11matrix_multiiPfiiPf
.globl _Z11matrix_multiiPf
.type _Z11matrix_multiiPf, @function
_Z11matrix_multiiPf:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z11matrix_multiiPfiiPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z11matrix_multiiPf, .-_Z11matrix_multiiPf
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z11matrix_multiiPf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z11matrix_multiiPf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._ZN9__gnu_cxx6__stoaIlicJiEEET0_PFT_PKT1_PPS3_DpT2_EPKcS5_PmS9_,"axG",@progbits,_ZN9__gnu_cxx6__stoaIlicJiEEET0_PFT_PKT1_PPS3_DpT2_EPKcS5_PmS9_,comdat
.weak _ZN9__gnu_cxx6__stoaIlicJiEEET0_PFT_PKT1_PPS3_DpT2_EPKcS5_PmS9_
.type _ZN9__gnu_cxx6__stoaIlicJiEEET0_PFT_PKT1_PPS3_DpT2_EPKcS5_PmS9_, @function
_ZN9__gnu_cxx6__stoaIlicJiEEET0_PFT_PKT1_PPS3_DpT2_EPKcS5_PmS9_:
.LFB3771:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA3771
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %r13
movq %rsi, 8(%rsp)
movq %rdx, %rbp
movq %rcx, %r12
movl %r8d, %r14d
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
call __errno_location@PLT
movq %rax, %rbx
movl (%rax), %r15d
movl $0, (%rax)
leaq 16(%rsp), %rsi
movl %r14d, %edx
movq %rbp, %rdi
.LEHB0:
call *%r13
movq 16(%rsp), %rcx
cmpq %rbp, %rcx
je .L27
cmpl $34, (%rbx)
je .L16
movl $2147483648, %edx
addq %rax, %rdx
shrq $32, %rdx
jne .L16
testq %r12, %r12
je .L19
subq %rbp, %rcx
movq %rcx, (%r12)
.L19:
cmpl $0, (%rbx)
jne .L13
movl %r15d, (%rbx)
.L13:
movq 24(%rsp), %rdx
subq %fs:40, %rdx
jne .L28
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L27:
.cfi_restore_state
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L29
movq 8(%rsp), %rdi
call _ZSt24__throw_invalid_argumentPKc@PLT
.L29:
call __stack_chk_fail@PLT
.L16:
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L30
movq 8(%rsp), %rdi
call _ZSt20__throw_out_of_rangePKc@PLT
.LEHE0:
.L25:
endbr64
movq %rax, %rdi
cmpl $0, (%rbx)
jne .L22
movl %r15d, (%rbx)
.L22:
movq 24(%rsp), %rax
subq %fs:40, %rax
je .L23
call __stack_chk_fail@PLT
.L30:
call __stack_chk_fail@PLT
.L23:
.LEHB1:
call _Unwind_Resume@PLT
.LEHE1:
.L28:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3771:
.globl __gxx_personality_v0
.section .gcc_except_table._ZN9__gnu_cxx6__stoaIlicJiEEET0_PFT_PKT1_PPS3_DpT2_EPKcS5_PmS9_,"aG",@progbits,_ZN9__gnu_cxx6__stoaIlicJiEEET0_PFT_PKT1_PPS3_DpT2_EPKcS5_PmS9_,comdat
.LLSDA3771:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE3771-.LLSDACSB3771
.LLSDACSB3771:
.uleb128 .LEHB0-.LFB3771
.uleb128 .LEHE0-.LEHB0
.uleb128 .L25-.LFB3771
.uleb128 0
.uleb128 .LEHB1-.LFB3771
.uleb128 .LEHE1-.LEHB1
.uleb128 0
.uleb128 0
.LLSDACSE3771:
.section .text._ZN9__gnu_cxx6__stoaIlicJiEEET0_PFT_PKT1_PPS3_DpT2_EPKcS5_PmS9_,"axG",@progbits,_ZN9__gnu_cxx6__stoaIlicJiEEET0_PFT_PKT1_PPS3_DpT2_EPKcS5_PmS9_,comdat
.size _ZN9__gnu_cxx6__stoaIlicJiEEET0_PFT_PKT1_PPS3_DpT2_EPKcS5_PmS9_, .-_ZN9__gnu_cxx6__stoaIlicJiEEET0_PFT_PKT1_PPS3_DpT2_EPKcS5_PmS9_
.section .text._ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPKcEEvT_S8_St20forward_iterator_tag,"axG",@progbits,_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPKcEEvT_S8_St20forward_iterator_tag,comdat
.align 2
.weak _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPKcEEvT_S8_St20forward_iterator_tag
.type _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPKcEEvT_S8_St20forward_iterator_tag, @function
_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPKcEEvT_S8_St20forward_iterator_tag:
.LFB4085:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $16, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %rbx
movq %rsi, %r12
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
subq %rsi, %rdx
movq %rdx, %rbp
movq %rdx, (%rsp)
cmpq $15, %rdx
ja .L38
movq (%rdi), %rdi
cmpq $1, %rdx
jne .L34
movzbl (%rsi), %eax
movb %al, (%rdi)
.L35:
movq (%rsp), %rax
movq %rax, 8(%rbx)
movq (%rbx), %rdx
movb $0, (%rdx,%rax)
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L39
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L38:
.cfi_restore_state
movq %rsp, %rsi
movl $0, %edx
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE9_M_createERmm@PLT
movq %rax, %rdi
movq %rax, (%rbx)
movq (%rsp), %rax
movq %rax, 16(%rbx)
.L33:
movq %rbp, %rdx
movq %r12, %rsi
call memcpy@PLT
jmp .L35
.L34:
testq %rdx, %rdx
je .L35
jmp .L33
.L39:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4085:
.size _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPKcEEvT_S8_St20forward_iterator_tag, .-_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPKcEEvT_S8_St20forward_iterator_tag
.section .rodata._ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "basic_string: construction from null is not valid"
.section .text._ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_,"axG",@progbits,_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC5IS3_EEPKcRKS3_,comdat
.align 2
.weak _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_
.type _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_, @function
_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_:
.LFB3998:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
leaq 16(%rdi), %rax
movq %rax, (%rdi)
testq %rsi, %rsi
je .L43
movq %rdi, %rbp
movq %rsi, %rbx
movq %rsi, %rdi
call strlen@PLT
leaq (%rbx,%rax), %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPKcEEvT_S8_St20forward_iterator_tag
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
leaq .LC1(%rip), %rdi
call _ZSt19__throw_logic_errorPKc@PLT
.cfi_endproc
.LFE3998:
.size _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_, .-_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_
.weak _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IS3_EEPKcRKS3_
.set _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IS3_EEPKcRKS3_,_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_
.section .rodata.str1.1
.LC2:
.string "-n"
.LC3:
.string "stoi"
.LC4:
.string "-r"
.LC5:
.string "-t"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC6:
.string "========================================"
.section .rodata.str1.1
.LC7:
.string "|\tMatrix Multiplication"
.LC8:
.string "|\tUsing CUDA 9.2"
.LC9:
.string "|\tN = "
.LC10:
.string "x"
.LC11:
.string " (="
.LC12:
.string ")"
.LC13:
.string "|\tRuns = "
.LC14:
.string "|\tThreads/Block = "
.LC15:
.string "|"
.LC16:
.string "|\trunning..."
.LC17:
.string "|\t done!"
.LC18:
.string "|\tTime = "
.LC20:
.string " seconds"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA3669
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $136, %rsp
.cfi_def_cfa_offset 192
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
cmpl $2, %edi
jle .L73
movq %rsi, %r12
movslq %edi, %rdi
leaq (%rsi,%rdi,8), %r15
movl $256, 12(%rsp)
movl $1000, 8(%rsp)
movl $1024, 4(%rsp)
jmp .L61
.L83:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L81
leaq .LC1(%rip), %rdi
.LEHB2:
call _ZSt19__throw_logic_errorPKc@PLT
.LEHE2:
.L81:
call __stack_chk_fail@PLT
.L86:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L82
leaq .LC1(%rip), %rdi
.LEHB3:
call _ZSt19__throw_logic_errorPKc@PLT
.LEHE3:
.L75:
endbr64
movq %rax, %rbx
jmp .L64
.L82:
call __stack_chk_fail@PLT
.L87:
movl $10, %r8d
movl $0, %ecx
movq 80(%rsp), %rdx
leaq .LC3(%rip), %rsi
movq __isoc23_strtol@GOTPCREL(%rip), %rdi
.LEHB4:
call _ZN9__gnu_cxx6__stoaIlicJiEEET0_PFT_PKT1_PPS3_DpT2_EPKcS5_PmS9_
.LEHE4:
movl %eax, 4(%rsp)
movq 80(%rsp), %rdi
leaq 96(%rsp), %rax
cmpq %rax, %rdi
je .L54
movq 96(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L54:
movq 48(%rsp), %rdi
leaq 64(%rsp), %rax
cmpq %rax, %rdi
je .L60
.L68:
movq 64(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L60:
addq $8, %r12
cmpq %r12, %r15
je .L45
.L61:
movq %r12, %r14
movq (%r12), %rbx
leaq 64(%rsp), %rax
movq %rax, 48(%rsp)
testq %rbx, %rbx
je .L83
movq %rbx, %rdi
call strlen@PLT
leaq (%rbx,%rax), %rdx
leaq 48(%rsp), %rdi
movq %rbx, %rsi
.LEHB5:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPKcEEvT_S8_St20forward_iterator_tag
.LEHE5:
movq 56(%rsp), %rbx
movl $2, %ebp
cmpq %rbp, %rbx
cmovbe %rbx, %rbp
movq 48(%rsp), %r13
testq %rbx, %rbx
je .L48
movq %rbp, %rdx
leaq .LC2(%rip), %rsi
movq %r13, %rdi
call memcmp@PLT
testl %eax, %eax
jne .L49
leaq -2(%rbx), %rax
movl $2147483648, %ecx
cmpq %rcx, %rax
jge .L49
cmpq $-2147483648, %rax
jge .L71
.L49:
movq %rbp, %rdx
leaq .LC4(%rip), %rsi
movq %r13, %rdi
call memcmp@PLT
testl %eax, %eax
jne .L56
leaq -2(%rbx), %rax
movl $2147483648, %ecx
cmpq %rcx, %rax
jge .L56
cmpq $-2147483648, %rax
jge .L70
.L56:
movq %rbp, %rdx
leaq .LC5(%rip), %rsi
movq %r13, %rdi
call memcmp@PLT
testl %eax, %eax
jne .L54
subq $2, %rbx
movl $2147483648, %eax
cmpq %rax, %rbx
jge .L59
cmpq $-2147483648, %rbx
jge .L69
.L59:
movq 48(%rsp), %rdi
jmp .L68
.L50:
testq %rbx, %rbx
jne .L49
leaq -2(%rbx), %rax
.L70:
testl %eax, %eax
jne .L57
leaq 36(%rsp), %rdx
movq 8(%r14), %rsi
leaq 80(%rsp), %rdi
.LEHB6:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IS3_EEPKcRKS3_
.LEHE6:
movl $10, %r8d
movl $0, %ecx
movq 80(%rsp), %rdx
leaq .LC3(%rip), %rsi
movq __isoc23_strtol@GOTPCREL(%rip), %rdi
.LEHB7:
call _ZN9__gnu_cxx6__stoaIlicJiEEET0_PFT_PKT1_PPS3_DpT2_EPKcS5_PmS9_
.LEHE7:
movl %eax, 8(%rsp)
leaq 80(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
jmp .L54
.L57:
testq %rbx, %rbx
jne .L56
subq $2, %rbx
.L69:
testl %ebx, %ebx
jne .L54
leaq 36(%rsp), %rdx
movq 8(%r14), %rsi
leaq 80(%rsp), %rdi
.LEHB8:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IS3_EEPKcRKS3_
.LEHE8:
movl $10, %r8d
movl $0, %ecx
movq 80(%rsp), %rdx
leaq .LC3(%rip), %rsi
movq __isoc23_strtol@GOTPCREL(%rip), %rdi
.LEHB9:
call _ZN9__gnu_cxx6__stoaIlicJiEEET0_PFT_PKT1_PPS3_DpT2_EPKcS5_PmS9_
.LEHE9:
movl %eax, 12(%rsp)
leaq 80(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
jmp .L54
.L73:
movl $256, 12(%rsp)
movl $1000, 8(%rsp)
movl $1024, 4(%rsp)
.L45:
movl 4(%rsp), %r15d
movl %r15d, %ebp
imull %r15d, %ebp
leaq .LC6(%rip), %r12
movq %r12, %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
.LEHB10:
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC7(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq %r12, %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC8(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC9(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %r15d, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC10(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %r15d, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC11(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebp, %r12d
movq %r12, %rsi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC12(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC13(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 8(%rsp), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC14(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 12(%rsp), %r14d
movl %r14d, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC15(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC16(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq 0(,%r12,4), %rsi
leaq 16(%rsp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
call clock@PLT
movq %rax, %r12
movl %r14d, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
leal -1(%r14,%rbp), %eax
movl $0, %edx
divl %r14d
movl %eax, 24(%rsp)
movl $1, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L84
.L62:
call cudaDeviceSynchronize@PLT
call clock@PLT
movq %rax, %rbx
leaq .LC17(%rip), %rsi
leaq _ZSt4cout(%rip), %rbp
movq %rbp, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC15(%rip), %rsi
movq %rbp, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC18(%rip), %rsi
movq %rbp, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
subq %r12, %rbx
pxor %xmm0, %xmm0
cvtsi2sdq %rbx, %xmm0
divsd .LC19(%rip), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
leaq .LC20(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC6(%rip), %rsi
movq %rbp, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L85
movl $0, %eax
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L84:
.cfi_restore_state
movq 16(%rsp), %rdx
movl 8(%rsp), %esi
movl %ebp, %edi
call _Z33__device_stub__Z11matrix_multiiPfiiPf
jmp .L62
.L74:
endbr64
movq %rax, %rbx
leaq 80(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L64:
leaq 48(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
movq 120(%rsp), %rax
subq %fs:40, %rax
je .L67
call __stack_chk_fail@PLT
.L76:
endbr64
movq %rax, %rbx
leaq 80(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
jmp .L64
.L77:
endbr64
movq %rax, %rbx
leaq 80(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
jmp .L64
.L67:
movq %rbx, %rdi
call _Unwind_Resume@PLT
.LEHE10:
.L48:
leaq -2(%rbx), %rax
.L71:
testl %eax, %eax
jne .L50
movq 8(%r14), %rbx
leaq 96(%rsp), %rax
movq %rax, 80(%rsp)
testq %rbx, %rbx
je .L86
movq %rbx, %rdi
call strlen@PLT
leaq (%rbx,%rax), %rdx
leaq 80(%rsp), %rdi
movq %rbx, %rsi
.LEHB11:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPKcEEvT_S8_St20forward_iterator_tag
.LEHE11:
jmp .L87
.L85:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.section .gcc_except_table,"a",@progbits
.LLSDA3669:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE3669-.LLSDACSB3669
.LLSDACSB3669:
.uleb128 .LEHB2-.LFB3669
.uleb128 .LEHE2-.LEHB2
.uleb128 0
.uleb128 0
.uleb128 .LEHB3-.LFB3669
.uleb128 .LEHE3-.LEHB3
.uleb128 .L75-.LFB3669
.uleb128 0
.uleb128 .LEHB4-.LFB3669
.uleb128 .LEHE4-.LEHB4
.uleb128 .L74-.LFB3669
.uleb128 0
.uleb128 .LEHB5-.LFB3669
.uleb128 .LEHE5-.LEHB5
.uleb128 0
.uleb128 0
.uleb128 .LEHB6-.LFB3669
.uleb128 .LEHE6-.LEHB6
.uleb128 .L75-.LFB3669
.uleb128 0
.uleb128 .LEHB7-.LFB3669
.uleb128 .LEHE7-.LEHB7
.uleb128 .L76-.LFB3669
.uleb128 0
.uleb128 .LEHB8-.LFB3669
.uleb128 .LEHE8-.LEHB8
.uleb128 .L75-.LFB3669
.uleb128 0
.uleb128 .LEHB9-.LFB3669
.uleb128 .LEHE9-.LEHB9
.uleb128 .L77-.LFB3669
.uleb128 0
.uleb128 .LEHB10-.LFB3669
.uleb128 .LEHE10-.LEHB10
.uleb128 0
.uleb128 0
.uleb128 .LEHB11-.LFB3669
.uleb128 .LEHE11-.LEHB11
.uleb128 .L75-.LFB3669
.uleb128 0
.LLSDACSE3669:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC19:
.long 0
.long 1093567616
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* Matrix Multiplication AX+Y in Cuda
*******************************************************************
* Description:
* Populate a float array of size N^2 with each index
* generated M times using mulitplication.
*******************************************************************
* Source:
* https://stackoverflow.com/questions/7663343/simplest-possible-example-to-show-gpu-outperform-cpu-using-cuda
*******************************************************************
*/
#include <ctime>
#include <iostream>
using namespace std;
/* matrix mult function */
__global__
void matrix_mult(int n, int r, float *matrix) {
// give index to each thread
int index = blockIdx.x * blockDim.x + threadIdx.x;
// assign i / N at current index
matrix[index] = 1.0f * index / n;
// generate a new value at current index r times
for (int j = 0; j < r; j++) {
matrix[index] = matrix[index] * matrix[index] - 0.25f;
}
}
/* program's main() */
int main(int argc, char* argv[]) {
// initialize default array size and run count, and threads per block
int N = 1024;
int R = 1000;
int T = 256;
// assign new values to N (and R) if arguments provided
if (argc > 2) {
// iterate over arguments
for (int i = 0; i < argc; i++) {
// get current argument
string arg = argv[i];
// if size specified
if (arg.compare("-n") == 0) {
N = stoi(argv[i + 1]);
}
// if run count specified
else if (arg.compare("-r") == 0) {
R = stoi(argv[i + 1]);
}
// if thread count specified
else if (arg.compare("-t") == 0) {
T = stoi(argv[i + 1]);
}
}
}
// calculate full size of matrix
unsigned int N_squared = N * N;
// print info
cout << "========================================" << endl;
cout << "|\tMatrix Multiplication" << endl;
cout << "========================================" << endl;
cout << "|\tUsing CUDA 9.2" << endl;
cout << "|\tN = " << N << "x" << N << " (=" << N_squared << ")"<< endl;
cout << "|\tRuns = " << R << endl;
cout << "|\tThreads/Block = " << T << endl;
cout << "|" << endl;
cout << "|\trunning..." << endl;
// initialize the float array (using 1D array to simulate 2D array or matrix)
float *matrix;
// allocate unified memory
cudaMallocManaged(&matrix, N_squared * sizeof(float));
// initialize clock
clock_t start = clock();
// perform matrix mult on CPU
matrix_mult<<<(N_squared + T - 1) / T, T>>>(N_squared, R, matrix);
// wait for GPU before continuing on CPU
cudaDeviceSynchronize();
// stop clock
clock_t stop = clock();
// print end status
cout << "|\t done!" << endl;
cout << "|" << endl;
cout << "|\tTime = " << (stop - start) / (double) CLOCKS_PER_SEC << " seconds" << endl;
cout << "========================================" << endl;
// free allocated memory
cudaFree(matrix);
return 0;
} | /* Matrix Multiplication AX+Y in Cuda
*******************************************************************
* Description:
* Populate a float array of size N^2 with each index
* generated M times using mulitplication.
*******************************************************************
* Source:
* https://stackoverflow.com/questions/7663343/simplest-possible-example-to-show-gpu-outperform-cpu-using-cuda
*******************************************************************
*/
#include <hip/hip_runtime.h>
#include <ctime>
#include <iostream>
using namespace std;
/* matrix mult function */
__global__
void matrix_mult(int n, int r, float *matrix) {
// give index to each thread
int index = blockIdx.x * blockDim.x + threadIdx.x;
// assign i / N at current index
matrix[index] = 1.0f * index / n;
// generate a new value at current index r times
for (int j = 0; j < r; j++) {
matrix[index] = matrix[index] * matrix[index] - 0.25f;
}
}
/* program's main() */
int main(int argc, char* argv[]) {
// initialize default array size and run count, and threads per block
int N = 1024;
int R = 1000;
int T = 256;
// assign new values to N (and R) if arguments provided
if (argc > 2) {
// iterate over arguments
for (int i = 0; i < argc; i++) {
// get current argument
string arg = argv[i];
// if size specified
if (arg.compare("-n") == 0) {
N = stoi(argv[i + 1]);
}
// if run count specified
else if (arg.compare("-r") == 0) {
R = stoi(argv[i + 1]);
}
// if thread count specified
else if (arg.compare("-t") == 0) {
T = stoi(argv[i + 1]);
}
}
}
// calculate full size of matrix
unsigned int N_squared = N * N;
// print info
cout << "========================================" << endl;
cout << "|\tMatrix Multiplication" << endl;
cout << "========================================" << endl;
cout << "|\tUsing CUDA 9.2" << endl;
cout << "|\tN = " << N << "x" << N << " (=" << N_squared << ")"<< endl;
cout << "|\tRuns = " << R << endl;
cout << "|\tThreads/Block = " << T << endl;
cout << "|" << endl;
cout << "|\trunning..." << endl;
// initialize the float array (using 1D array to simulate 2D array or matrix)
float *matrix;
// allocate unified memory
hipMallocManaged(&matrix, N_squared * sizeof(float));
// initialize clock
clock_t start = clock();
// perform matrix mult on CPU
matrix_mult<<<(N_squared + T - 1) / T, T>>>(N_squared, R, matrix);
// wait for GPU before continuing on CPU
hipDeviceSynchronize();
// stop clock
clock_t stop = clock();
// print end status
cout << "|\t done!" << endl;
cout << "|" << endl;
cout << "|\tTime = " << (stop - start) / (double) CLOCKS_PER_SEC << " seconds" << endl;
cout << "========================================" << endl;
// free allocated memory
hipFree(matrix);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /* Matrix Multiplication AX+Y in Cuda
*******************************************************************
* Description:
* Populate a float array of size N^2 with each index
* generated M times using mulitplication.
*******************************************************************
* Source:
* https://stackoverflow.com/questions/7663343/simplest-possible-example-to-show-gpu-outperform-cpu-using-cuda
*******************************************************************
*/
#include <hip/hip_runtime.h>
#include <ctime>
#include <iostream>
using namespace std;
/* matrix mult function */
__global__
void matrix_mult(int n, int r, float *matrix) {
// give index to each thread
int index = blockIdx.x * blockDim.x + threadIdx.x;
// assign i / N at current index
matrix[index] = 1.0f * index / n;
// generate a new value at current index r times
for (int j = 0; j < r; j++) {
matrix[index] = matrix[index] * matrix[index] - 0.25f;
}
}
/* program's main() */
int main(int argc, char* argv[]) {
// initialize default array size and run count, and threads per block
int N = 1024;
int R = 1000;
int T = 256;
// assign new values to N (and R) if arguments provided
if (argc > 2) {
// iterate over arguments
for (int i = 0; i < argc; i++) {
// get current argument
string arg = argv[i];
// if size specified
if (arg.compare("-n") == 0) {
N = stoi(argv[i + 1]);
}
// if run count specified
else if (arg.compare("-r") == 0) {
R = stoi(argv[i + 1]);
}
// if thread count specified
else if (arg.compare("-t") == 0) {
T = stoi(argv[i + 1]);
}
}
}
// calculate full size of matrix
unsigned int N_squared = N * N;
// print info
cout << "========================================" << endl;
cout << "|\tMatrix Multiplication" << endl;
cout << "========================================" << endl;
cout << "|\tUsing CUDA 9.2" << endl;
cout << "|\tN = " << N << "x" << N << " (=" << N_squared << ")"<< endl;
cout << "|\tRuns = " << R << endl;
cout << "|\tThreads/Block = " << T << endl;
cout << "|" << endl;
cout << "|\trunning..." << endl;
// initialize the float array (using 1D array to simulate 2D array or matrix)
float *matrix;
// allocate unified memory
hipMallocManaged(&matrix, N_squared * sizeof(float));
// initialize clock
clock_t start = clock();
// perform matrix mult on CPU
matrix_mult<<<(N_squared + T - 1) / T, T>>>(N_squared, R, matrix);
// wait for GPU before continuing on CPU
hipDeviceSynchronize();
// stop clock
clock_t stop = clock();
// print end status
cout << "|\t done!" << endl;
cout << "|" << endl;
cout << "|\tTime = " << (stop - start) / (double) CLOCKS_PER_SEC << " seconds" << endl;
cout << "========================================" << endl;
// free allocated memory
hipFree(matrix);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11matrix_multiiPf
.globl _Z11matrix_multiiPf
.p2align 8
.type _Z11matrix_multiiPf,@function
_Z11matrix_multiiPf:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b128 s[0:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
v_cvt_f32_i32_e32 v3, s0
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_cmp_lt_i32 s1, 1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_f32_i32_e32 v4, v1
v_div_scale_f32 v0, null, v3, v3, v4
v_div_scale_f32 v6, vcc_lo, v4, v3, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v5, v0
s_waitcnt_depctr 0xfff
v_fma_f32 v2, -v0, v5, 1.0
v_fmac_f32_e32 v5, v2, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v7, v6, v5
v_fma_f32 v2, -v0, v7, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fmac_f32_e32 v7, v2, v5
v_ashrrev_i32_e32 v2, 31, v1
v_fma_f32 v0, -v0, v7, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_div_fmas_f32 v5, v0, v5, v7
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_div_fixup_f32 v2, v5, v3, v4
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_store_b32 v[0:1], v2, off
s_cbranch_scc1 .LBB0_4
global_load_b32 v2, v[0:1], off
.LBB0_2:
s_waitcnt vmcnt(0)
v_fmaak_f32 v2, v2, v2, 0xbe800000
s_add_i32 s1, s1, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s1, 0
s_cbranch_scc0 .LBB0_2
global_store_b32 v[0:1], v2, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11matrix_multiiPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11matrix_multiiPf, .Lfunc_end0-_Z11matrix_multiiPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11matrix_multiiPf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11matrix_multiiPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z11matrix_multiiPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ I2F R4, c[0x0][0x160] ; /* 0x0000580000047b06 */
/* 0x000e620000201400 */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ BSSY B0, 0x130 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e2a0000002100 */
/*0060*/ MUFU.RCP R5, R4 ; /* 0x0000000400057308 */
/* 0x002e620000001000 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fce00078e0203 */
/*0080*/ I2F R3, R0 ; /* 0x0000000000037306 */
/* 0x000e220000201400 */
/*0090*/ FFMA R2, -R4, R5, 1 ; /* 0x3f80000004027423 */
/* 0x002fc80000000105 */
/*00a0*/ FFMA R2, R5, R2, R5 ; /* 0x0000000205027223 */
/* 0x000fc60000000005 */
/*00b0*/ FCHK P0, R3, R4 ; /* 0x0000000403007302 */
/* 0x001e220000000000 */
/*00c0*/ FFMA R5, R3, R2, RZ ; /* 0x0000000203057223 */
/* 0x000fc800000000ff */
/*00d0*/ FFMA R6, -R4, R5, R3 ; /* 0x0000000504067223 */
/* 0x000fc80000000103 */
/*00e0*/ FFMA R5, R2, R6, R5 ; /* 0x0000000602057223 */
/* 0x000fe20000000005 */
/*00f0*/ @!P0 BRA 0x120 ; /* 0x0000002000008947 */
/* 0x001fea0003800000 */
/*0100*/ MOV R2, 0x120 ; /* 0x0000012000027802 */
/* 0x000fe40000000f00 */
/*0110*/ CALL.REL.NOINC 0x540 ; /* 0x0000042000007944 */
/* 0x000fea0003c00000 */
/*0120*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0130*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff047624 */
/* 0x000fe400078e00ff */
/*0140*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc600078e00ff */
/*0150*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fe20003f06270 */
/*0160*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fca00078e0203 */
/*0170*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x0001ee000c101904 */
/*0180*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0190*/ IADD3 R0, R4, -0x1, RZ ; /* 0xffffffff04007810 */
/* 0x000fc80007ffe0ff */
/*01a0*/ ISETP.GE.U32.AND P0, PT, R0, 0x3, PT ; /* 0x000000030000780c */
/* 0x000fe40003f06070 */
/*01b0*/ LOP3.LUT R0, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304007812 */
/* 0x000fd600078ec0ff */
/*01c0*/ @!P0 BRA 0x4c0 ; /* 0x000002f000008947 */
/* 0x000fea0003800000 */
/*01d0*/ IADD3 R4, -R0, c[0x0][0x164], RZ ; /* 0x0000590000047a10 */
/* 0x000fc80007ffe1ff */
/*01e0*/ ISETP.GT.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fda0003f04270 */
/*01f0*/ @!P0 BRA 0x450 ; /* 0x0000025000008947 */
/* 0x000fea0003800000 */
/*0200*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */
/* 0x000fe40003f24270 */
/*0210*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0220*/ @!P1 BRA 0x370 ; /* 0x0000014000009947 */
/* 0x000fea0003800000 */
/*0230*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0240*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x001fe20000000005 */
/*0250*/ IADD3 R4, R4, -0x10, RZ ; /* 0xfffffff004047810 */
/* 0x000fc60007ffe0ff */
/*0260*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fe20000000005 */
/*0270*/ ISETP.GT.AND P1, PT, R4, 0xc, PT ; /* 0x0000000c0400780c */
/* 0x000fc60003f24270 */
/*0280*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0290*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*02a0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*02b0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*02c0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*02d0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*02e0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*02f0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0300*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0310*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0320*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0330*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0340*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0350*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fe20000000005 */
/*0360*/ @P1 BRA 0x240 ; /* 0xfffffed000001947 */
/* 0x000fea000383ffff */
/*0370*/ ISETP.GT.AND P1, PT, R4, 0x4, PT ; /* 0x000000040400780c */
/* 0x000fda0003f24270 */
/*0380*/ @!P1 BRA 0x430 ; /* 0x000000a000009947 */
/* 0x000fea0003800000 */
/*0390*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x001fe20000000005 */
/*03a0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*03b0*/ IADD3 R4, R4, -0x8, RZ ; /* 0xfffffff804047810 */
/* 0x000fe20007ffe0ff */
/*03c0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*03d0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*03e0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*03f0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0400*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0410*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0420*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fe40000000005 */
/*0430*/ ISETP.NE.OR P0, PT, R4, RZ, P0 ; /* 0x000000ff0400720c */
/* 0x000fda0000705670 */
/*0440*/ @!P0 BRA 0x4c0 ; /* 0x0000007000008947 */
/* 0x000fea0003800000 */
/*0450*/ IADD3 R4, R4, -0x4, RZ ; /* 0xfffffffc04047810 */
/* 0x000fe20007ffe0ff */
/*0460*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x001fc60000000005 */
/*0470*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f05270 */
/*0480*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*0490*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*04a0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x000fc80000000005 */
/*04b0*/ @P0 BRA 0x450 ; /* 0xffffff9000000947 */
/* 0x000fea000383ffff */
/*04c0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*04d0*/ @!P0 BRA 0x520 ; /* 0x0000004000008947 */
/* 0x000fea0003800000 */
/*04e0*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */
/* 0x000fe20007ffe0ff */
/*04f0*/ FFMA R5, R5, R5, -0.25 ; /* 0xbe80000005057423 */
/* 0x001fc60000000005 */
/*0500*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*0510*/ @P0 BRA 0x4e0 ; /* 0xffffffc000000947 */
/* 0x000fea000383ffff */
/*0520*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0530*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0540*/ SHF.R.U32.HI R6, RZ, 0x17, R4.reuse ; /* 0x00000017ff067819 */
/* 0x100fe20000011604 */
/*0550*/ BSSY B1, 0xba0 ; /* 0x0000064000017945 */
/* 0x000fe20003800000 */
/*0560*/ SHF.R.U32.HI R5, RZ, 0x17, R3.reuse ; /* 0x00000017ff057819 */
/* 0x100fe20000011603 */
/*0570*/ IMAD.MOV.U32 R7, RZ, RZ, R3 ; /* 0x000000ffff077224 */
/* 0x000fe200078e0003 */
/*0580*/ LOP3.LUT R6, R6, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff06067812 */
/* 0x000fe200078ec0ff */
/*0590*/ IMAD.MOV.U32 R8, RZ, RZ, R4 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0004 */
/*05a0*/ LOP3.LUT R5, R5, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff05057812 */
/* 0x000fe400078ec0ff */
/*05b0*/ IADD3 R11, R6, -0x1, RZ ; /* 0xffffffff060b7810 */
/* 0x000fe40007ffe0ff */
/*05c0*/ IADD3 R10, R5, -0x1, RZ ; /* 0xffffffff050a7810 */
/* 0x000fc40007ffe0ff */
/*05d0*/ ISETP.GT.U32.AND P0, PT, R11, 0xfd, PT ; /* 0x000000fd0b00780c */
/* 0x000fc80003f04070 */
/*05e0*/ ISETP.GT.U32.OR P0, PT, R10, 0xfd, P0 ; /* 0x000000fd0a00780c */
/* 0x000fda0000704470 */
/*05f0*/ @!P0 IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff098224 */
/* 0x000fe200078e00ff */
/*0600*/ @!P0 BRA 0x780 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*0610*/ FSETP.GTU.FTZ.AND P0, PT, |R3|, +INF , PT ; /* 0x7f8000000300780b */
/* 0x000fe40003f1c200 */
/*0620*/ FSETP.GTU.FTZ.AND P1, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */
/* 0x000fc80003f3c200 */
/*0630*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000703570 */
/*0640*/ @P0 BRA 0xb80 ; /* 0x0000053000000947 */
/* 0x000fea0003800000 */
/*0650*/ LOP3.LUT P0, RZ, R8, 0x7fffffff, R7, 0xc8, !PT ; /* 0x7fffffff08ff7812 */
/* 0x000fda000780c807 */
/*0660*/ @!P0 BRA 0xb60 ; /* 0x000004f000008947 */
/* 0x000fea0003800000 */
/*0670*/ FSETP.NEU.FTZ.AND P2, PT, |R3|.reuse, +INF , PT ; /* 0x7f8000000300780b */
/* 0x040fe40003f5d200 */
/*0680*/ FSETP.NEU.FTZ.AND P1, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */
/* 0x000fe40003f3d200 */
/*0690*/ FSETP.NEU.FTZ.AND P0, PT, |R3|, +INF , PT ; /* 0x7f8000000300780b */
/* 0x000fd60003f1d200 */
/*06a0*/ @!P1 BRA !P2, 0xb60 ; /* 0x000004b000009947 */
/* 0x000fea0005000000 */
/*06b0*/ LOP3.LUT P2, RZ, R7, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff07ff7812 */
/* 0x000fc8000784c0ff */
/*06c0*/ PLOP3.LUT P1, PT, P1, P2, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000f24572 */
/*06d0*/ @P1 BRA 0xb40 ; /* 0x0000046000001947 */
/* 0x000fea0003800000 */
/*06e0*/ LOP3.LUT P1, RZ, R8, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff08ff7812 */
/* 0x000fc8000782c0ff */
/*06f0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000702572 */
/*0700*/ @P0 BRA 0xb10 ; /* 0x0000040000000947 */
/* 0x000fea0003800000 */
/*0710*/ ISETP.GE.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fe40003f06270 */
/*0720*/ ISETP.GE.AND P1, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */
/* 0x000fd60003f26270 */
/*0730*/ @P0 IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff090224 */
/* 0x000fe400078e00ff */
/*0740*/ @!P0 IMAD.MOV.U32 R9, RZ, RZ, -0x40 ; /* 0xffffffc0ff098424 */
/* 0x000fe400078e00ff */
/*0750*/ @!P0 FFMA R7, R3, 1.84467440737095516160e+19, RZ ; /* 0x5f80000003078823 */
/* 0x000fe400000000ff */
/*0760*/ @!P1 FFMA R8, R4, 1.84467440737095516160e+19, RZ ; /* 0x5f80000004089823 */
/* 0x000fe200000000ff */
/*0770*/ @!P1 IADD3 R9, R9, 0x40, RZ ; /* 0x0000004009099810 */
/* 0x000fe40007ffe0ff */
/*0780*/ LEA R3, R6, 0xc0800000, 0x17 ; /* 0xc080000006037811 */
/* 0x000fe200078eb8ff */
/*0790*/ BSSY B2, 0xb00 ; /* 0x0000036000027945 */
/* 0x000fe20003800000 */
/*07a0*/ IADD3 R5, R5, -0x7f, RZ ; /* 0xffffff8105057810 */
/* 0x000fc60007ffe0ff */
/*07b0*/ IMAD.IADD R8, R8, 0x1, -R3 ; /* 0x0000000108087824 */
/* 0x000fe200078e0a03 */
/*07c0*/ IADD3 R6, R5.reuse, 0x7f, -R6 ; /* 0x0000007f05067810 */
/* 0x040fe20007ffe806 */
/*07d0*/ IMAD R7, R5, -0x800000, R7 ; /* 0xff80000005077824 */
/* 0x000fe400078e0207 */
/*07e0*/ MUFU.RCP R3, R8 ; /* 0x0000000800037308 */
/* 0x000e220000001000 */
/*07f0*/ FADD.FTZ R4, -R8, -RZ ; /* 0x800000ff08047221 */
/* 0x000fe40000010100 */
/*0800*/ IMAD.IADD R6, R6, 0x1, R9 ; /* 0x0000000106067824 */
/* 0x000fe400078e0209 */
/*0810*/ FFMA R10, R3, R4, 1 ; /* 0x3f800000030a7423 */
/* 0x001fc80000000004 */
/*0820*/ FFMA R12, R3, R10, R3 ; /* 0x0000000a030c7223 */
/* 0x000fc80000000003 */
/*0830*/ FFMA R3, R7, R12, RZ ; /* 0x0000000c07037223 */
/* 0x000fc800000000ff */
/*0840*/ FFMA R10, R4, R3, R7 ; /* 0x00000003040a7223 */
/* 0x000fc80000000007 */
/*0850*/ FFMA R11, R12, R10, R3 ; /* 0x0000000a0c0b7223 */
/* 0x000fc80000000003 */
/*0860*/ FFMA R7, R4, R11, R7 ; /* 0x0000000b04077223 */
/* 0x000fc80000000007 */
/*0870*/ FFMA R3, R12, R7, R11 ; /* 0x000000070c037223 */
/* 0x000fca000000000b */
/*0880*/ SHF.R.U32.HI R4, RZ, 0x17, R3 ; /* 0x00000017ff047819 */
/* 0x000fc80000011603 */
/*0890*/ LOP3.LUT R4, R4, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff04047812 */
/* 0x000fca00078ec0ff */
/*08a0*/ IMAD.IADD R8, R4, 0x1, R6 ; /* 0x0000000104087824 */
/* 0x000fca00078e0206 */
/*08b0*/ IADD3 R4, R8, -0x1, RZ ; /* 0xffffffff08047810 */
/* 0x000fc80007ffe0ff */
/*08c0*/ ISETP.GE.U32.AND P0, PT, R4, 0xfe, PT ; /* 0x000000fe0400780c */
/* 0x000fda0003f06070 */
/*08d0*/ @!P0 BRA 0xae0 ; /* 0x0000020000008947 */
/* 0x000fea0003800000 */
/*08e0*/ ISETP.GT.AND P0, PT, R8, 0xfe, PT ; /* 0x000000fe0800780c */
/* 0x000fda0003f04270 */
/*08f0*/ @P0 BRA 0xab0 ; /* 0x000001b000000947 */
/* 0x000fea0003800000 */
/*0900*/ ISETP.GE.AND P0, PT, R8, 0x1, PT ; /* 0x000000010800780c */
/* 0x000fda0003f06270 */
/*0910*/ @P0 BRA 0xaf0 ; /* 0x000001d000000947 */
/* 0x000fea0003800000 */
/*0920*/ ISETP.GE.AND P0, PT, R8, -0x18, PT ; /* 0xffffffe80800780c */
/* 0x000fe40003f06270 */
/*0930*/ LOP3.LUT R3, R3, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000003037812 */
/* 0x000fd600078ec0ff */
/*0940*/ @!P0 BRA 0xaf0 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*0950*/ FFMA.RZ R4, R12, R7.reuse, R11.reuse ; /* 0x000000070c047223 */
/* 0x180fe2000000c00b */
/*0960*/ ISETP.NE.AND P2, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f45270 */
/*0970*/ FFMA.RM R5, R12, R7.reuse, R11.reuse ; /* 0x000000070c057223 */
/* 0x180fe2000000400b */
/*0980*/ ISETP.NE.AND P1, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f25270 */
/*0990*/ LOP3.LUT R6, R4, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff04067812 */
/* 0x000fe200078ec0ff */
/*09a0*/ FFMA.RP R4, R12, R7, R11 ; /* 0x000000070c047223 */
/* 0x000fe2000000800b */
/*09b0*/ IADD3 R7, R8, 0x20, RZ ; /* 0x0000002008077810 */
/* 0x000fe20007ffe0ff */
/*09c0*/ IMAD.MOV R8, RZ, RZ, -R8 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0a08 */
/*09d0*/ LOP3.LUT R6, R6, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000006067812 */
/* 0x000fe400078efcff */
/*09e0*/ FSETP.NEU.FTZ.AND P0, PT, R4, R5, PT ; /* 0x000000050400720b */
/* 0x000fc40003f1d000 */
/*09f0*/ SHF.L.U32 R7, R6, R7, RZ ; /* 0x0000000706077219 */
/* 0x000fe400000006ff */
/*0a00*/ SEL R5, R8, RZ, P2 ; /* 0x000000ff08057207 */
/* 0x000fe40001000000 */
/*0a10*/ ISETP.NE.AND P1, PT, R7, RZ, P1 ; /* 0x000000ff0700720c */
/* 0x000fe40000f25270 */
/*0a20*/ SHF.R.U32.HI R5, RZ, R5, R6 ; /* 0x00000005ff057219 */
/* 0x000fe40000011606 */
/*0a30*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40000703570 */
/*0a40*/ SHF.R.U32.HI R7, RZ, 0x1, R5 ; /* 0x00000001ff077819 */
/* 0x000fc40000011605 */
/*0a50*/ SEL R4, RZ, 0x1, !P0 ; /* 0x00000001ff047807 */
/* 0x000fc80004000000 */
/*0a60*/ LOP3.LUT R4, R4, 0x1, R7, 0xf8, !PT ; /* 0x0000000104047812 */
/* 0x000fc800078ef807 */
/*0a70*/ LOP3.LUT R4, R4, R5, RZ, 0xc0, !PT ; /* 0x0000000504047212 */
/* 0x000fca00078ec0ff */
/*0a80*/ IMAD.IADD R4, R7, 0x1, R4 ; /* 0x0000000107047824 */
/* 0x000fca00078e0204 */
/*0a90*/ LOP3.LUT R3, R4, R3, RZ, 0xfc, !PT ; /* 0x0000000304037212 */
/* 0x000fe200078efcff */
/*0aa0*/ BRA 0xaf0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0ab0*/ LOP3.LUT R3, R3, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000003037812 */
/* 0x000fc800078ec0ff */
/*0ac0*/ LOP3.LUT R3, R3, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000003037812 */
/* 0x000fe200078efcff */
/*0ad0*/ BRA 0xaf0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0ae0*/ IMAD R3, R6, 0x800000, R3 ; /* 0x0080000006037824 */
/* 0x000fe400078e0203 */
/*0af0*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0b00*/ BRA 0xb90 ; /* 0x0000008000007947 */
/* 0x000fea0003800000 */
/*0b10*/ LOP3.LUT R3, R8, 0x80000000, R7, 0x48, !PT ; /* 0x8000000008037812 */
/* 0x000fc800078e4807 */
/*0b20*/ LOP3.LUT R3, R3, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000003037812 */
/* 0x000fe200078efcff */
/*0b30*/ BRA 0xb90 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*0b40*/ LOP3.LUT R3, R8, 0x80000000, R7, 0x48, !PT ; /* 0x8000000008037812 */
/* 0x000fe200078e4807 */
/*0b50*/ BRA 0xb90 ; /* 0x0000003000007947 */
/* 0x000fea0003800000 */
/*0b60*/ MUFU.RSQ R3, -QNAN ; /* 0xffc0000000037908 */
/* 0x000e220000001400 */
/*0b70*/ BRA 0xb90 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0b80*/ FADD.FTZ R3, R3, R4 ; /* 0x0000000403037221 */
/* 0x000fe40000010000 */
/*0b90*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0ba0*/ IMAD.MOV.U32 R5, RZ, RZ, R3 ; /* 0x000000ffff057224 */
/* 0x001fe400078e0003 */
/*0bb0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x0 ; /* 0x00000000ff037424 */
/* 0x000fc800078e00ff */
/*0bc0*/ RET.REL.NODEC R2 0x0 ; /* 0xfffff43002007950 */
/* 0x000fea0003c3ffff */
/*0bd0*/ BRA 0xbd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0be0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0bf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11matrix_multiiPf
.globl _Z11matrix_multiiPf
.p2align 8
.type _Z11matrix_multiiPf,@function
_Z11matrix_multiiPf:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b128 s[0:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
v_cvt_f32_i32_e32 v3, s0
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_cmp_lt_i32 s1, 1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_f32_i32_e32 v4, v1
v_div_scale_f32 v0, null, v3, v3, v4
v_div_scale_f32 v6, vcc_lo, v4, v3, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v5, v0
s_waitcnt_depctr 0xfff
v_fma_f32 v2, -v0, v5, 1.0
v_fmac_f32_e32 v5, v2, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v7, v6, v5
v_fma_f32 v2, -v0, v7, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fmac_f32_e32 v7, v2, v5
v_ashrrev_i32_e32 v2, 31, v1
v_fma_f32 v0, -v0, v7, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_div_fmas_f32 v5, v0, v5, v7
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_div_fixup_f32 v2, v5, v3, v4
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_store_b32 v[0:1], v2, off
s_cbranch_scc1 .LBB0_4
global_load_b32 v2, v[0:1], off
.LBB0_2:
s_waitcnt vmcnt(0)
v_fmaak_f32 v2, v2, v2, 0xbe800000
s_add_i32 s1, s1, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s1, 0
s_cbranch_scc0 .LBB0_2
global_store_b32 v[0:1], v2, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11matrix_multiiPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11matrix_multiiPf, .Lfunc_end0-_Z11matrix_multiiPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11matrix_multiiPf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11matrix_multiiPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void topBoundaryKernel(double* temperature, int block_size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < block_size) {
temperature[1 + i] = 1.0;
}
} | code for sm_80
Function : _Z17topBoundaryKernelPdi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IADD3 R2, R0, 0x1, RZ ; /* 0x0000000100027810 */
/* 0x000fe20007ffe0ff */
/*0070*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */
/* 0x000fe200078e00ff */
/*0080*/ HFMA2.MMA R4, -RZ, RZ, 0, 0 ; /* 0x00000000ff047435 */
/* 0x000fe200000001ff */
/*0090*/ MOV R5, 0x3ff00000 ; /* 0x3ff0000000057802 */
/* 0x000fe20000000f00 */
/*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00b0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*00c0*/ STG.E.64 [R2.64], R4 ; /* 0x0000000402007986 */
/* 0x000fe2000c101b04 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void topBoundaryKernel(double* temperature, int block_size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < block_size) {
temperature[1 + i] = 1.0;
}
} | .file "tmpxft_000a471f_00000000-6_topBoundaryKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z17topBoundaryKernelPdiPdi
.type _Z38__device_stub__Z17topBoundaryKernelPdiPdi, @function
_Z38__device_stub__Z17topBoundaryKernelPdiPdi:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z17topBoundaryKernelPdi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z17topBoundaryKernelPdiPdi, .-_Z38__device_stub__Z17topBoundaryKernelPdiPdi
.globl _Z17topBoundaryKernelPdi
.type _Z17topBoundaryKernelPdi, @function
_Z17topBoundaryKernelPdi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z17topBoundaryKernelPdiPdi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z17topBoundaryKernelPdi, .-_Z17topBoundaryKernelPdi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z17topBoundaryKernelPdi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z17topBoundaryKernelPdi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void topBoundaryKernel(double* temperature, int block_size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < block_size) {
temperature[1 + i] = 1.0;
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void topBoundaryKernel(double* temperature, int block_size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < block_size) {
temperature[1 + i] = 1.0;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void topBoundaryKernel(double* temperature, int block_size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < block_size) {
temperature[1 + i] = 1.0;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z17topBoundaryKernelPdi
.globl _Z17topBoundaryKernelPdi
.p2align 8
.type _Z17topBoundaryKernelPdi,@function
_Z17topBoundaryKernelPdi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v3, 0x3ff00000
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[0:1], 3, v[1:2]
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b64 v[0:1], v[2:3], off offset:8
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17topBoundaryKernelPdi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z17topBoundaryKernelPdi, .Lfunc_end0-_Z17topBoundaryKernelPdi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17topBoundaryKernelPdi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17topBoundaryKernelPdi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void topBoundaryKernel(double* temperature, int block_size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < block_size) {
temperature[1 + i] = 1.0;
}
} | .text
.file "topBoundaryKernel.hip"
.globl _Z32__device_stub__topBoundaryKernelPdi # -- Begin function _Z32__device_stub__topBoundaryKernelPdi
.p2align 4, 0x90
.type _Z32__device_stub__topBoundaryKernelPdi,@function
_Z32__device_stub__topBoundaryKernelPdi: # @_Z32__device_stub__topBoundaryKernelPdi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z17topBoundaryKernelPdi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z32__device_stub__topBoundaryKernelPdi, .Lfunc_end0-_Z32__device_stub__topBoundaryKernelPdi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17topBoundaryKernelPdi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z17topBoundaryKernelPdi,@object # @_Z17topBoundaryKernelPdi
.section .rodata,"a",@progbits
.globl _Z17topBoundaryKernelPdi
.p2align 3, 0x0
_Z17topBoundaryKernelPdi:
.quad _Z32__device_stub__topBoundaryKernelPdi
.size _Z17topBoundaryKernelPdi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z17topBoundaryKernelPdi"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z32__device_stub__topBoundaryKernelPdi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z17topBoundaryKernelPdi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z17topBoundaryKernelPdi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IADD3 R2, R0, 0x1, RZ ; /* 0x0000000100027810 */
/* 0x000fe20007ffe0ff */
/*0070*/ IMAD.MOV.U32 R3, RZ, RZ, 0x8 ; /* 0x00000008ff037424 */
/* 0x000fe200078e00ff */
/*0080*/ HFMA2.MMA R4, -RZ, RZ, 0, 0 ; /* 0x00000000ff047435 */
/* 0x000fe200000001ff */
/*0090*/ MOV R5, 0x3ff00000 ; /* 0x3ff0000000057802 */
/* 0x000fe20000000f00 */
/*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00b0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*00c0*/ STG.E.64 [R2.64], R4 ; /* 0x0000000402007986 */
/* 0x000fe2000c101b04 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z17topBoundaryKernelPdi
.globl _Z17topBoundaryKernelPdi
.p2align 8
.type _Z17topBoundaryKernelPdi,@function
_Z17topBoundaryKernelPdi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v3, 0x3ff00000
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[0:1], 3, v[1:2]
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b64 v[0:1], v[2:3], off offset:8
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17topBoundaryKernelPdi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z17topBoundaryKernelPdi, .Lfunc_end0-_Z17topBoundaryKernelPdi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17topBoundaryKernelPdi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17topBoundaryKernelPdi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000a471f_00000000-6_topBoundaryKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z17topBoundaryKernelPdiPdi
.type _Z38__device_stub__Z17topBoundaryKernelPdiPdi, @function
_Z38__device_stub__Z17topBoundaryKernelPdiPdi:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z17topBoundaryKernelPdi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z17topBoundaryKernelPdiPdi, .-_Z38__device_stub__Z17topBoundaryKernelPdiPdi
.globl _Z17topBoundaryKernelPdi
.type _Z17topBoundaryKernelPdi, @function
_Z17topBoundaryKernelPdi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z17topBoundaryKernelPdiPdi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z17topBoundaryKernelPdi, .-_Z17topBoundaryKernelPdi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z17topBoundaryKernelPdi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z17topBoundaryKernelPdi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "topBoundaryKernel.hip"
.globl _Z32__device_stub__topBoundaryKernelPdi # -- Begin function _Z32__device_stub__topBoundaryKernelPdi
.p2align 4, 0x90
.type _Z32__device_stub__topBoundaryKernelPdi,@function
_Z32__device_stub__topBoundaryKernelPdi: # @_Z32__device_stub__topBoundaryKernelPdi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z17topBoundaryKernelPdi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z32__device_stub__topBoundaryKernelPdi, .Lfunc_end0-_Z32__device_stub__topBoundaryKernelPdi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17topBoundaryKernelPdi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z17topBoundaryKernelPdi,@object # @_Z17topBoundaryKernelPdi
.section .rodata,"a",@progbits
.globl _Z17topBoundaryKernelPdi
.p2align 3, 0x0
_Z17topBoundaryKernelPdi:
.quad _Z32__device_stub__topBoundaryKernelPdi
.size _Z17topBoundaryKernelPdi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z17topBoundaryKernelPdi"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z32__device_stub__topBoundaryKernelPdi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z17topBoundaryKernelPdi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
__global__ void add( int *a, int *b, int *c, int vector_size ) {
// Calculate the index in the vector for the thread using the internal variables
int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE
// This if statement is added in case we have more threads executing
// Than number of elements in the vectors. How can this help?
if (tid < vector_size){
// Compute the addition
// HERE
c[tid] = a[tid] + b[tid];
}
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments
if (argc != 3) {
// Tell the user how to run the program
printf ("Usage: %s vector_size block_size\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return 1;
}
// Set GPU Variables based on input arguments
int vector_size = atoi(argv[1]);
int block_size = atoi(argv[2]);
int grid_size = ((vector_size-1)/block_size) + 1;
// Set device that we will use for our cuda code
cudaSetDevice(0);
// Time Variables
cudaEvent_t start, stop;
float time;
cudaEventCreate (&start);
cudaEventCreate (&stop);
// Input Arrays and variables
int *a = new int [vector_size];
int *b = new int [vector_size];
int *c_cpu = new int [vector_size];
int *c_gpu = new int [vector_size];
// Pointers in GPU memory
int *dev_a;
int *dev_b;
int *dev_c;
// fill the arrays 'a' and 'b' on the CPU
printf("Initializing input arrays.\n");
for (int i = 0; i < vector_size; i++) {
a[i] = rand()%10;
b[i] = rand()%10;
}
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
cudaEventRecord(start,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
c_cpu[i] = a[i] + b[i];
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tSequential Job Time: %.2f ms\n", time);
// allocate the memory on the GPU
// HERE
cudaMalloc((void**)&dev_a, sizeof(int) * vector_size);
cudaMalloc((void**)&dev_b, sizeof(int) * vector_size);
cudaMalloc((void**)&dev_c, sizeof(int) * vector_size);
// copy the arrays 'a' and 'b' to the GPU
// HERE
cudaMemcpy(dev_a, a, sizeof(int) *vector_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(int) *vector_size, cudaMemcpyHostToDevice);
//
// GPU Calculation
////////////////////////
printf("Running parallel job.\n");
cudaEventRecord(start,0);
// call the kernel
// HERE
dim3 dimGrid(grid_size);
dim3 dimBlock(block_size);
add<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, vector_size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
// HERE (there's one more at the end, don't miss it!)
cudaMemcpy(c_gpu, dev_c, sizeof(int) * vector_size, cudaMemcpyDeviceToHost);
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (c_cpu[i] != c_gpu[i]){
error = 1;
printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free CPU data
free (a);
free (b);
free (c_cpu);
free (c_gpu);
// free the memory allocated on the GPU
// HERE
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | code for sm_80
Function : _Z3addPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
__global__ void add( int *a, int *b, int *c, int vector_size ) {
// Calculate the index in the vector for the thread using the internal variables
int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE
// This if statement is added in case we have more threads executing
// Than number of elements in the vectors. How can this help?
if (tid < vector_size){
// Compute the addition
// HERE
c[tid] = a[tid] + b[tid];
}
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments
if (argc != 3) {
// Tell the user how to run the program
printf ("Usage: %s vector_size block_size\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return 1;
}
// Set GPU Variables based on input arguments
int vector_size = atoi(argv[1]);
int block_size = atoi(argv[2]);
int grid_size = ((vector_size-1)/block_size) + 1;
// Set device that we will use for our cuda code
cudaSetDevice(0);
// Time Variables
cudaEvent_t start, stop;
float time;
cudaEventCreate (&start);
cudaEventCreate (&stop);
// Input Arrays and variables
int *a = new int [vector_size];
int *b = new int [vector_size];
int *c_cpu = new int [vector_size];
int *c_gpu = new int [vector_size];
// Pointers in GPU memory
int *dev_a;
int *dev_b;
int *dev_c;
// fill the arrays 'a' and 'b' on the CPU
printf("Initializing input arrays.\n");
for (int i = 0; i < vector_size; i++) {
a[i] = rand()%10;
b[i] = rand()%10;
}
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
cudaEventRecord(start,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
c_cpu[i] = a[i] + b[i];
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tSequential Job Time: %.2f ms\n", time);
// allocate the memory on the GPU
// HERE
cudaMalloc((void**)&dev_a, sizeof(int) * vector_size);
cudaMalloc((void**)&dev_b, sizeof(int) * vector_size);
cudaMalloc((void**)&dev_c, sizeof(int) * vector_size);
// copy the arrays 'a' and 'b' to the GPU
// HERE
cudaMemcpy(dev_a, a, sizeof(int) *vector_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(int) *vector_size, cudaMemcpyHostToDevice);
//
// GPU Calculation
////////////////////////
printf("Running parallel job.\n");
cudaEventRecord(start,0);
// call the kernel
// HERE
dim3 dimGrid(grid_size);
dim3 dimBlock(block_size);
add<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, vector_size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
// HERE (there's one more at the end, don't miss it!)
cudaMemcpy(c_gpu, dev_c, sizeof(int) * vector_size, cudaMemcpyDeviceToHost);
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (c_cpu[i] != c_gpu[i]){
error = 1;
printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free CPU data
free (a);
free (b);
free (c_cpu);
free (c_gpu);
// free the memory allocated on the GPU
// HERE
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | .file "tmpxft_000d63ad_00000000-6_VectorAdd.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z3addPiS_S_iPiS_S_i
.type _Z27__device_stub__Z3addPiS_S_iPiS_S_i, @function
_Z27__device_stub__Z3addPiS_S_iPiS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z3addPiS_S_iPiS_S_i, .-_Z27__device_stub__Z3addPiS_S_iPiS_S_i
.globl _Z3addPiS_S_i
.type _Z3addPiS_S_i, @function
_Z3addPiS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z3addPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z3addPiS_S_i, .-_Z3addPiS_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Usage: %s vector_size block_size\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Initializing input arrays.\n"
.LC2:
.string "Running sequential job.\n"
.section .rodata.str1.8
.align 8
.LC3:
.string "\tSequential Job Time: %.2f ms\n"
.section .rodata.str1.1
.LC4:
.string "Running parallel job.\n"
.LC5:
.string "\tParallel Job Time: %.2f ms\n"
.section .rodata.str1.8
.align 8
.LC6:
.string "Error starting element %d, %d != %d\n"
.align 8
.LC7:
.string "Correct result. No errors were found.\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %rsi, %rbx
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
cmpl $3, %edi
je .L12
movq (%rsi), %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $1, %eax
.L11:
movq 104(%rsp), %rdx
subq %fs:40, %rdx
jne .L32
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r15
movq %rax, 8(%rsp)
movl %eax, 28(%rsp)
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rsi
movq %rax, 16(%rsp)
movl %r15d, %eax
subl $1, %eax
cltd
idivl %esi
addl $1, %eax
movl %eax, 24(%rsp)
movl $0, %edi
call cudaSetDevice@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
leaq 48(%rsp), %rdi
call cudaEventCreate@PLT
movslq %r15d, %rax
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L14
leaq 0(,%rax,4), %rbx
movq %rbx, (%rsp)
movq %rbx, %rdi
call _Znam@PLT
movq %rax, %r12
movq %rbx, %rdi
call _Znam@PLT
movq %rax, %r13
movq %rbx, %rdi
call _Znam@PLT
movq %rax, %rbp
movq %rbx, %rdi
call _Znam@PLT
movq %rax, %r14
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %r15d, %r15d
jle .L33
movq 8(%rsp), %rax
leal -1(%rax), %r15d
movl $0, %ebx
.L18:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
movl %eax, (%r12,%rbx,4)
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
movl %eax, 0(%r13,%rbx,4)
movq %rbx, %rax
addq $1, %rbx
cmpq %r15, %rax
jne .L18
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movl $0, %eax
.L19:
movl 0(%r13,%rax,4), %edx
addl (%r12,%rax,4), %edx
movl %edx, 0(%rbp,%rax,4)
movq %rax, %rdx
addq $1, %rax
cmpq %r15, %rdx
jne .L19
.L25:
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
movq 48(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 36(%rsp), %rdi
movq 48(%rsp), %rdx
movq 40(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 36(%rsp), %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq 56(%rsp), %rdi
movq (%rsp), %rbx
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movl 24(%rsp), %eax
movl %eax, 80(%rsp)
movl $1, 84(%rsp)
movl 16(%rsp), %eax
movl %eax, 92(%rsp)
movl $1, 96(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L34
.L20:
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
movq 48(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 36(%rsp), %rdi
movq 48(%rsp), %rdx
movq 40(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 36(%rsp), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movq (%rsp), %rdx
movq 72(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rax
testl %eax, %eax
jle .L21
leal -1(%rax), %esi
movl $0, %edx
jmp .L24
.L14:
movq 104(%rsp), %rax
subq %fs:40, %rax
je .L17
call __stack_chk_fail@PLT
.L17:
call __cxa_throw_bad_array_new_length@PLT
.L34:
movl 28(%rsp), %ecx
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z27__device_stub__Z3addPiS_S_iPiS_S_i
jmp .L20
.L27:
movq %rax, %rdx
.L24:
movl 0(%rbp,%rdx,4), %r8d
movl (%r14,%rdx,4), %ecx
cmpl %ecx, %r8d
jne .L35
leaq 1(%rdx), %rax
cmpq %rsi, %rdx
jne .L27
.L21:
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L23
.L35:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L23:
movq %r12, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %r14, %rdi
call free@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movl $0, %eax
jmp .L11
.L33:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
jmp .L25
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z3addPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
__global__ void add( int *a, int *b, int *c, int vector_size ) {
// Calculate the index in the vector for the thread using the internal variables
int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE
// This if statement is added in case we have more threads executing
// Than number of elements in the vectors. How can this help?
if (tid < vector_size){
// Compute the addition
// HERE
c[tid] = a[tid] + b[tid];
}
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments
if (argc != 3) {
// Tell the user how to run the program
printf ("Usage: %s vector_size block_size\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return 1;
}
// Set GPU Variables based on input arguments
int vector_size = atoi(argv[1]);
int block_size = atoi(argv[2]);
int grid_size = ((vector_size-1)/block_size) + 1;
// Set device that we will use for our cuda code
cudaSetDevice(0);
// Time Variables
cudaEvent_t start, stop;
float time;
cudaEventCreate (&start);
cudaEventCreate (&stop);
// Input Arrays and variables
int *a = new int [vector_size];
int *b = new int [vector_size];
int *c_cpu = new int [vector_size];
int *c_gpu = new int [vector_size];
// Pointers in GPU memory
int *dev_a;
int *dev_b;
int *dev_c;
// fill the arrays 'a' and 'b' on the CPU
printf("Initializing input arrays.\n");
for (int i = 0; i < vector_size; i++) {
a[i] = rand()%10;
b[i] = rand()%10;
}
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
cudaEventRecord(start,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
c_cpu[i] = a[i] + b[i];
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tSequential Job Time: %.2f ms\n", time);
// allocate the memory on the GPU
// HERE
cudaMalloc((void**)&dev_a, sizeof(int) * vector_size);
cudaMalloc((void**)&dev_b, sizeof(int) * vector_size);
cudaMalloc((void**)&dev_c, sizeof(int) * vector_size);
// copy the arrays 'a' and 'b' to the GPU
// HERE
cudaMemcpy(dev_a, a, sizeof(int) *vector_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(int) *vector_size, cudaMemcpyHostToDevice);
//
// GPU Calculation
////////////////////////
printf("Running parallel job.\n");
cudaEventRecord(start,0);
// call the kernel
// HERE
dim3 dimGrid(grid_size);
dim3 dimBlock(block_size);
add<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, vector_size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
// HERE (there's one more at the end, don't miss it!)
cudaMemcpy(c_gpu, dev_c, sizeof(int) * vector_size, cudaMemcpyDeviceToHost);
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (c_cpu[i] != c_gpu[i]){
error = 1;
printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free CPU data
free (a);
free (b);
free (c_cpu);
free (c_gpu);
// free the memory allocated on the GPU
// HERE
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void add( int *a, int *b, int *c, int vector_size ) {
// Calculate the index in the vector for the thread using the internal variables
int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE
// This if statement is added in case we have more threads executing
// Than number of elements in the vectors. How can this help?
if (tid < vector_size){
// Compute the addition
// HERE
c[tid] = a[tid] + b[tid];
}
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments
if (argc != 3) {
// Tell the user how to run the program
printf ("Usage: %s vector_size block_size\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return 1;
}
// Set GPU Variables based on input arguments
int vector_size = atoi(argv[1]);
int block_size = atoi(argv[2]);
int grid_size = ((vector_size-1)/block_size) + 1;
// Set device that we will use for our cuda code
hipSetDevice(0);
// Time Variables
hipEvent_t start, stop;
float time;
hipEventCreate (&start);
hipEventCreate (&stop);
// Input Arrays and variables
int *a = new int [vector_size];
int *b = new int [vector_size];
int *c_cpu = new int [vector_size];
int *c_gpu = new int [vector_size];
// Pointers in GPU memory
int *dev_a;
int *dev_b;
int *dev_c;
// fill the arrays 'a' and 'b' on the CPU
printf("Initializing input arrays.\n");
for (int i = 0; i < vector_size; i++) {
a[i] = rand()%10;
b[i] = rand()%10;
}
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
hipEventRecord(start,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
c_cpu[i] = a[i] + b[i];
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("\tSequential Job Time: %.2f ms\n", time);
// allocate the memory on the GPU
// HERE
hipMalloc((void**)&dev_a, sizeof(int) * vector_size);
hipMalloc((void**)&dev_b, sizeof(int) * vector_size);
hipMalloc((void**)&dev_c, sizeof(int) * vector_size);
// copy the arrays 'a' and 'b' to the GPU
// HERE
hipMemcpy(dev_a, a, sizeof(int) *vector_size, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, sizeof(int) *vector_size, hipMemcpyHostToDevice);
//
// GPU Calculation
////////////////////////
printf("Running parallel job.\n");
hipEventRecord(start,0);
// call the kernel
// HERE
dim3 dimGrid(grid_size);
dim3 dimBlock(block_size);
add<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, vector_size);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
// HERE (there's one more at the end, don't miss it!)
hipMemcpy(c_gpu, dev_c, sizeof(int) * vector_size, hipMemcpyDeviceToHost);
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (c_cpu[i] != c_gpu[i]){
error = 1;
printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free CPU data
free (a);
free (b);
free (c_cpu);
free (c_gpu);
// free the memory allocated on the GPU
// HERE
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void add( int *a, int *b, int *c, int vector_size ) {
// Calculate the index in the vector for the thread using the internal variables
int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE
// This if statement is added in case we have more threads executing
// Than number of elements in the vectors. How can this help?
if (tid < vector_size){
// Compute the addition
// HERE
c[tid] = a[tid] + b[tid];
}
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments
if (argc != 3) {
// Tell the user how to run the program
printf ("Usage: %s vector_size block_size\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return 1;
}
// Set GPU Variables based on input arguments
int vector_size = atoi(argv[1]);
int block_size = atoi(argv[2]);
int grid_size = ((vector_size-1)/block_size) + 1;
// Set device that we will use for our cuda code
hipSetDevice(0);
// Time Variables
hipEvent_t start, stop;
float time;
hipEventCreate (&start);
hipEventCreate (&stop);
// Input Arrays and variables
int *a = new int [vector_size];
int *b = new int [vector_size];
int *c_cpu = new int [vector_size];
int *c_gpu = new int [vector_size];
// Pointers in GPU memory
int *dev_a;
int *dev_b;
int *dev_c;
// fill the arrays 'a' and 'b' on the CPU
printf("Initializing input arrays.\n");
for (int i = 0; i < vector_size; i++) {
a[i] = rand()%10;
b[i] = rand()%10;
}
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
hipEventRecord(start,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
c_cpu[i] = a[i] + b[i];
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("\tSequential Job Time: %.2f ms\n", time);
// allocate the memory on the GPU
// HERE
hipMalloc((void**)&dev_a, sizeof(int) * vector_size);
hipMalloc((void**)&dev_b, sizeof(int) * vector_size);
hipMalloc((void**)&dev_c, sizeof(int) * vector_size);
// copy the arrays 'a' and 'b' to the GPU
// HERE
hipMemcpy(dev_a, a, sizeof(int) *vector_size, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, sizeof(int) *vector_size, hipMemcpyHostToDevice);
//
// GPU Calculation
////////////////////////
printf("Running parallel job.\n");
hipEventRecord(start,0);
// call the kernel
// HERE
dim3 dimGrid(grid_size);
dim3 dimBlock(block_size);
add<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, vector_size);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
// HERE (there's one more at the end, don't miss it!)
hipMemcpy(c_gpu, dev_c, sizeof(int) * vector_size, hipMemcpyDeviceToHost);
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (c_cpu[i] != c_gpu[i]){
error = 1;
printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free CPU data
free (a);
free (b);
free (c_cpu);
free (c_gpu);
// free the memory allocated on the GPU
// HERE
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_i
.globl _Z3addPiS_S_i
.p2align 8
.type _Z3addPiS_S_i,@function
_Z3addPiS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_i, .Lfunc_end0-_Z3addPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void add( int *a, int *b, int *c, int vector_size ) {
// Calculate the index in the vector for the thread using the internal variables
int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE
// This if statement is added in case we have more threads executing
// Than number of elements in the vectors. How can this help?
if (tid < vector_size){
// Compute the addition
// HERE
c[tid] = a[tid] + b[tid];
}
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments
if (argc != 3) {
// Tell the user how to run the program
printf ("Usage: %s vector_size block_size\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return 1;
}
// Set GPU Variables based on input arguments
int vector_size = atoi(argv[1]);
int block_size = atoi(argv[2]);
int grid_size = ((vector_size-1)/block_size) + 1;
// Set device that we will use for our cuda code
hipSetDevice(0);
// Time Variables
hipEvent_t start, stop;
float time;
hipEventCreate (&start);
hipEventCreate (&stop);
// Input Arrays and variables
int *a = new int [vector_size];
int *b = new int [vector_size];
int *c_cpu = new int [vector_size];
int *c_gpu = new int [vector_size];
// Pointers in GPU memory
int *dev_a;
int *dev_b;
int *dev_c;
// fill the arrays 'a' and 'b' on the CPU
printf("Initializing input arrays.\n");
for (int i = 0; i < vector_size; i++) {
a[i] = rand()%10;
b[i] = rand()%10;
}
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
hipEventRecord(start,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
c_cpu[i] = a[i] + b[i];
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("\tSequential Job Time: %.2f ms\n", time);
// allocate the memory on the GPU
// HERE
hipMalloc((void**)&dev_a, sizeof(int) * vector_size);
hipMalloc((void**)&dev_b, sizeof(int) * vector_size);
hipMalloc((void**)&dev_c, sizeof(int) * vector_size);
// copy the arrays 'a' and 'b' to the GPU
// HERE
hipMemcpy(dev_a, a, sizeof(int) *vector_size, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, sizeof(int) *vector_size, hipMemcpyHostToDevice);
//
// GPU Calculation
////////////////////////
printf("Running parallel job.\n");
hipEventRecord(start,0);
// call the kernel
// HERE
dim3 dimGrid(grid_size);
dim3 dimBlock(block_size);
add<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, vector_size);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
// HERE (there's one more at the end, don't miss it!)
hipMemcpy(c_gpu, dev_c, sizeof(int) * vector_size, hipMemcpyDeviceToHost);
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (c_cpu[i] != c_gpu[i]){
error = 1;
printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free CPU data
free (a);
free (b);
free (c_cpu);
free (c_gpu);
// free the memory allocated on the GPU
// HERE
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} | .text
.file "VectorAdd.hip"
.globl _Z18__device_stub__addPiS_S_i # -- Begin function _Z18__device_stub__addPiS_S_i
.p2align 4, 0x90
.type _Z18__device_stub__addPiS_S_i,@function
_Z18__device_stub__addPiS_S_i: # @_Z18__device_stub__addPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiS_S_i, .Lfunc_end0-_Z18__device_stub__addPiS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
cmpl $3, %edi
jne .LBB1_1
# %bb.2:
movq 8(%rsi), %rdi
movq %rsi, %rbx
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rcx
leal -1(%r14), %eax
cltd
movq %rcx, 72(%rsp) # 8-byte Spill
idivl %ecx
movl %eax, 20(%rsp) # 4-byte Spill
xorl %edi, %edi
callq hipSetDevice
leaq 24(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq %r14, 64(%rsp) # 8-byte Spill
movslq %r14d, %rbx
leaq (,%rbx,4), %rax
testl %ebx, %ebx
movq $-1, %r13
movq %rax, 80(%rsp) # 8-byte Spill
cmovnsq %rax, %r13
movq %r13, %rdi
callq _Znam
movq %rax, %r14
movq %r13, %rdi
callq _Znam
movq %rax, %r15
movq %r13, %rdi
callq _Znam
movq %rax, %r12
movq %r13, %rdi
callq _Znam
movq %rax, %r13
movl $.Lstr, %edi
callq puts@PLT
testl %ebx, %ebx
jle .LBB1_5
# %bb.3: # %.lr.ph.preheader
movl 64(%rsp), %ebx # 4-byte Reload
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movl %eax, (%r14,%rbp,4)
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movl %eax, (%r15,%rbp,4)
incq %rbp
cmpq %rbp, %rbx
jne .LBB1_4
.LBB1_5: # %._crit_edge
incl 20(%rsp) # 4-byte Folded Spill
movl $.Lstr.1, %edi
callq puts@PLT
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 64(%rsp), %rbx # 8-byte Reload
testl %ebx, %ebx
jle .LBB1_8
# %bb.6: # %.lr.ph85.preheader
movl %ebx, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_7: # %.lr.ph85
# =>This Inner Loop Header: Depth=1
movl (%r15,%rcx,4), %edx
addl (%r14,%rcx,4), %edx
movl %edx, (%r12,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB1_7
.LBB1_8: # %._crit_edge86
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
leaq 48(%rsp), %rdi
movq 80(%rsp), %rbp # 8-byte Reload
movq %rbp, %rsi
callq hipMalloc
leaq 40(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
leaq 32(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
movq 48(%rsp), %rdi
movq %r14, %rsi
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
movq 40(%rsp), %rdi
movq %r15, %rsi
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
movl $.Lstr.2, %edi
callq puts@PLT
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl 20(%rsp), %edi # 4-byte Reload
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl 72(%rsp), %edx # 4-byte Reload
orq %rax, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_10
# %bb.9:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movq %rdx, 136(%rsp)
movl %ebx, 60(%rsp)
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 144(%rsp), %rax
movq %rax, 168(%rsp)
leaq 136(%rsp), %rax
movq %rax, 176(%rsp)
leaq 60(%rsp), %rax
movq %rax, 184(%rsp)
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
leaq 160(%rsp), %r9
movl $_Z3addPiS_S_i, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_10:
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.5, %edi
movb $1, %al
callq printf
movq 32(%rsp), %rsi
movq %r13, %rdi
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
testl %ebx, %ebx
jle .LBB1_15
# %bb.11: # %.lr.ph90.preheader
movl %ebx, %eax
xorl %esi, %esi
.p2align 4, 0x90
.LBB1_12: # %.lr.ph90
# =>This Inner Loop Header: Depth=1
movl (%r12,%rsi,4), %ecx
movl (%r13,%rsi,4), %edx
cmpl %edx, %ecx
jne .LBB1_13
# %bb.14: # %.critedge
# in Loop: Header=BB1_12 Depth=1
incq %rsi
cmpq %rsi, %rax
jne .LBB1_12
.LBB1_15: # %.critedge77
movl $.Lstr.3, %edi
callq puts@PLT
jmp .LBB1_16
.LBB1_1:
movq (%rsi), %rsi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $1, %eax
jmp .LBB1_17
.LBB1_13:
movl $.L.str.6, %edi
# kill: def $esi killed $esi killed $rsi
xorl %eax, %eax
callq printf
.LBB1_16:
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
movq %r13, %rdi
callq free
movq 48(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
xorl %eax, %eax
.LBB1_17:
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiS_S_i,@object # @_Z3addPiS_S_i
.section .rodata,"a",@progbits
.globl _Z3addPiS_S_i
.p2align 3, 0x0
_Z3addPiS_S_i:
.quad _Z18__device_stub__addPiS_S_i
.size _Z3addPiS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Usage: %s vector_size block_size\n"
.size .L.str, 34
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "\tSequential Job Time: %.2f ms\n"
.size .L.str.3, 31
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "\tParallel Job Time: %.2f ms\n"
.size .L.str.5, 29
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Error starting element %d, %d != %d\n"
.size .L.str.6, 37
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPiS_S_i"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Initializing input arrays."
.size .Lstr, 27
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Running sequential job."
.size .Lstr.1, 24
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Running parallel job."
.size .Lstr.2, 22
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Correct result. No errors were found."
.size .Lstr.3, 38
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3addPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_i
.globl _Z3addPiS_S_i
.p2align 8
.type _Z3addPiS_S_i,@function
_Z3addPiS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_i, .Lfunc_end0-_Z3addPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000d63ad_00000000-6_VectorAdd.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z3addPiS_S_iPiS_S_i
.type _Z27__device_stub__Z3addPiS_S_iPiS_S_i, @function
_Z27__device_stub__Z3addPiS_S_iPiS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z3addPiS_S_iPiS_S_i, .-_Z27__device_stub__Z3addPiS_S_iPiS_S_i
.globl _Z3addPiS_S_i
.type _Z3addPiS_S_i, @function
_Z3addPiS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z3addPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z3addPiS_S_i, .-_Z3addPiS_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Usage: %s vector_size block_size\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Initializing input arrays.\n"
.LC2:
.string "Running sequential job.\n"
.section .rodata.str1.8
.align 8
.LC3:
.string "\tSequential Job Time: %.2f ms\n"
.section .rodata.str1.1
.LC4:
.string "Running parallel job.\n"
.LC5:
.string "\tParallel Job Time: %.2f ms\n"
.section .rodata.str1.8
.align 8
.LC6:
.string "Error starting element %d, %d != %d\n"
.align 8
.LC7:
.string "Correct result. No errors were found.\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %rsi, %rbx
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
cmpl $3, %edi
je .L12
movq (%rsi), %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $1, %eax
.L11:
movq 104(%rsp), %rdx
subq %fs:40, %rdx
jne .L32
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r15
movq %rax, 8(%rsp)
movl %eax, 28(%rsp)
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rsi
movq %rax, 16(%rsp)
movl %r15d, %eax
subl $1, %eax
cltd
idivl %esi
addl $1, %eax
movl %eax, 24(%rsp)
movl $0, %edi
call cudaSetDevice@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
leaq 48(%rsp), %rdi
call cudaEventCreate@PLT
movslq %r15d, %rax
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L14
leaq 0(,%rax,4), %rbx
movq %rbx, (%rsp)
movq %rbx, %rdi
call _Znam@PLT
movq %rax, %r12
movq %rbx, %rdi
call _Znam@PLT
movq %rax, %r13
movq %rbx, %rdi
call _Znam@PLT
movq %rax, %rbp
movq %rbx, %rdi
call _Znam@PLT
movq %rax, %r14
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %r15d, %r15d
jle .L33
movq 8(%rsp), %rax
leal -1(%rax), %r15d
movl $0, %ebx
.L18:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
movl %eax, (%r12,%rbx,4)
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
movl %eax, 0(%r13,%rbx,4)
movq %rbx, %rax
addq $1, %rbx
cmpq %r15, %rax
jne .L18
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movl $0, %eax
.L19:
movl 0(%r13,%rax,4), %edx
addl (%r12,%rax,4), %edx
movl %edx, 0(%rbp,%rax,4)
movq %rax, %rdx
addq $1, %rax
cmpq %r15, %rdx
jne .L19
.L25:
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
movq 48(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 36(%rsp), %rdi
movq 48(%rsp), %rdx
movq 40(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 36(%rsp), %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq 56(%rsp), %rdi
movq (%rsp), %rbx
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movl 24(%rsp), %eax
movl %eax, 80(%rsp)
movl $1, 84(%rsp)
movl 16(%rsp), %eax
movl %eax, 92(%rsp)
movl $1, 96(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L34
.L20:
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
movq 48(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 36(%rsp), %rdi
movq 48(%rsp), %rdx
movq 40(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 36(%rsp), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movq (%rsp), %rdx
movq 72(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rax
testl %eax, %eax
jle .L21
leal -1(%rax), %esi
movl $0, %edx
jmp .L24
.L14:
movq 104(%rsp), %rax
subq %fs:40, %rax
je .L17
call __stack_chk_fail@PLT
.L17:
call __cxa_throw_bad_array_new_length@PLT
.L34:
movl 28(%rsp), %ecx
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z27__device_stub__Z3addPiS_S_iPiS_S_i
jmp .L20
.L27:
movq %rax, %rdx
.L24:
movl 0(%rbp,%rdx,4), %r8d
movl (%r14,%rdx,4), %ecx
cmpl %ecx, %r8d
jne .L35
leaq 1(%rdx), %rax
cmpq %rsi, %rdx
jne .L27
.L21:
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L23
.L35:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L23:
movq %r12, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %r14, %rdi
call free@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movl $0, %eax
jmp .L11
.L33:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
jmp .L25
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z3addPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "VectorAdd.hip"
.globl _Z18__device_stub__addPiS_S_i # -- Begin function _Z18__device_stub__addPiS_S_i
.p2align 4, 0x90
.type _Z18__device_stub__addPiS_S_i,@function
_Z18__device_stub__addPiS_S_i: # @_Z18__device_stub__addPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiS_S_i, .Lfunc_end0-_Z18__device_stub__addPiS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
cmpl $3, %edi
jne .LBB1_1
# %bb.2:
movq 8(%rsi), %rdi
movq %rsi, %rbx
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rcx
leal -1(%r14), %eax
cltd
movq %rcx, 72(%rsp) # 8-byte Spill
idivl %ecx
movl %eax, 20(%rsp) # 4-byte Spill
xorl %edi, %edi
callq hipSetDevice
leaq 24(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq %r14, 64(%rsp) # 8-byte Spill
movslq %r14d, %rbx
leaq (,%rbx,4), %rax
testl %ebx, %ebx
movq $-1, %r13
movq %rax, 80(%rsp) # 8-byte Spill
cmovnsq %rax, %r13
movq %r13, %rdi
callq _Znam
movq %rax, %r14
movq %r13, %rdi
callq _Znam
movq %rax, %r15
movq %r13, %rdi
callq _Znam
movq %rax, %r12
movq %r13, %rdi
callq _Znam
movq %rax, %r13
movl $.Lstr, %edi
callq puts@PLT
testl %ebx, %ebx
jle .LBB1_5
# %bb.3: # %.lr.ph.preheader
movl 64(%rsp), %ebx # 4-byte Reload
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movl %eax, (%r14,%rbp,4)
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movl %eax, (%r15,%rbp,4)
incq %rbp
cmpq %rbp, %rbx
jne .LBB1_4
.LBB1_5: # %._crit_edge
incl 20(%rsp) # 4-byte Folded Spill
movl $.Lstr.1, %edi
callq puts@PLT
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 64(%rsp), %rbx # 8-byte Reload
testl %ebx, %ebx
jle .LBB1_8
# %bb.6: # %.lr.ph85.preheader
movl %ebx, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_7: # %.lr.ph85
# =>This Inner Loop Header: Depth=1
movl (%r15,%rcx,4), %edx
addl (%r14,%rcx,4), %edx
movl %edx, (%r12,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB1_7
.LBB1_8: # %._crit_edge86
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
leaq 48(%rsp), %rdi
movq 80(%rsp), %rbp # 8-byte Reload
movq %rbp, %rsi
callq hipMalloc
leaq 40(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
leaq 32(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
movq 48(%rsp), %rdi
movq %r14, %rsi
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
movq 40(%rsp), %rdi
movq %r15, %rsi
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
movl $.Lstr.2, %edi
callq puts@PLT
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl 20(%rsp), %edi # 4-byte Reload
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl 72(%rsp), %edx # 4-byte Reload
orq %rax, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_10
# %bb.9:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movq %rdx, 136(%rsp)
movl %ebx, 60(%rsp)
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 144(%rsp), %rax
movq %rax, 168(%rsp)
leaq 136(%rsp), %rax
movq %rax, 176(%rsp)
leaq 60(%rsp), %rax
movq %rax, 184(%rsp)
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
leaq 160(%rsp), %r9
movl $_Z3addPiS_S_i, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_10:
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.5, %edi
movb $1, %al
callq printf
movq 32(%rsp), %rsi
movq %r13, %rdi
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
testl %ebx, %ebx
jle .LBB1_15
# %bb.11: # %.lr.ph90.preheader
movl %ebx, %eax
xorl %esi, %esi
.p2align 4, 0x90
.LBB1_12: # %.lr.ph90
# =>This Inner Loop Header: Depth=1
movl (%r12,%rsi,4), %ecx
movl (%r13,%rsi,4), %edx
cmpl %edx, %ecx
jne .LBB1_13
# %bb.14: # %.critedge
# in Loop: Header=BB1_12 Depth=1
incq %rsi
cmpq %rsi, %rax
jne .LBB1_12
.LBB1_15: # %.critedge77
movl $.Lstr.3, %edi
callq puts@PLT
jmp .LBB1_16
.LBB1_1:
movq (%rsi), %rsi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $1, %eax
jmp .LBB1_17
.LBB1_13:
movl $.L.str.6, %edi
# kill: def $esi killed $esi killed $rsi
xorl %eax, %eax
callq printf
.LBB1_16:
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
movq %r13, %rdi
callq free
movq 48(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
xorl %eax, %eax
.LBB1_17:
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiS_S_i,@object # @_Z3addPiS_S_i
.section .rodata,"a",@progbits
.globl _Z3addPiS_S_i
.p2align 3, 0x0
_Z3addPiS_S_i:
.quad _Z18__device_stub__addPiS_S_i
.size _Z3addPiS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Usage: %s vector_size block_size\n"
.size .L.str, 34
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "\tSequential Job Time: %.2f ms\n"
.size .L.str.3, 31
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "\tParallel Job Time: %.2f ms\n"
.size .L.str.5, 29
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Error starting element %d, %d != %d\n"
.size .L.str.6, 37
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPiS_S_i"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Initializing input arrays."
.size .Lstr, 27
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Running sequential job."
.size .Lstr.1, 24
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Running parallel job."
.size .Lstr.2, 22
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Correct result. No errors were found."
.size .Lstr.3, 38
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include <time.h>
#include<cuda.h>
//kernel
__global__
void vecAddKernel(float *A, float *B, float *C, unsigned int N){
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N){
C[idx] = A[idx] + B[idx];
}
}
int main(){
unsigned int N = 100000000;
float *A_h, *B_h,*C_h;
float *A_d, *B_d,*C_d;
// Allocate host memory
A_h = (float *)malloc(N * sizeof(float));
B_h = (float *)malloc(N * sizeof(float));
C_h = (float *)malloc(N * sizeof(float));
for (unsigned int i = 0; i<N; i++){
A_h[i] = 1.0f;
B_h[i] = 2.0f;
}
// Allocate device memory
cudaMalloc(&A_d, N*sizeof(float));
cudaMalloc(&B_d, N*sizeof(float));
cudaMalloc(&C_d, N*sizeof(float));
//memory copy from host to device
cudaMemcpy(A_d, A_h, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, N*sizeof(float), cudaMemcpyHostToDevice);
unsigned int blockSize = 1024;
unsigned int gridSize = (unsigned int)ceil((double)N/blockSize);
//dim3 gridDim =
clock_t start = clock();
vecAddKernel<<< gridSize, blockSize >>>(A_d, B_d, C_d, N);
clock_t end = clock();
clock_t gpu_time = end - start;
printf("GPU Computation over, time: %f \n", (double)gpu_time);
cudaMemcpy(C_h, C_d, N*sizeof(float), cudaMemcpyDeviceToHost);
float *C_ht;
C_ht = (float *)malloc(N * sizeof(float));
start = clock();
for(unsigned int i = 0; i < N; i++){
C_ht[i] = A_h[i] + B_h[i];
}
end = clock();
clock_t cpu_time = end - start;
printf("CPU Computation over, time: %f \n", (double)cpu_time);
printf("GPU is %f times faster\n", (double)cpu_time/gpu_time);
bool valid = true;
for(unsigned int i = 0; i < N; i++){
if(C_h[i] != C_ht[i]){
printf("Wrong! %f vs %f\n", C_h[i], C_ht[i]);
valid = false;
break;
}
}
printf("Correct!!\n");
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
free(A_h);
free(B_h);
free(C_h);
free(C_ht);
return 0;
} | code for sm_80
Function : _Z12vecAddKernelPfS_S_j
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE.U32 R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0007 */
/*0090*/ IMAD.WIDE.U32 R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0007 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0007 */
/*00d0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include <time.h>
#include<cuda.h>
//kernel
__global__
void vecAddKernel(float *A, float *B, float *C, unsigned int N){
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N){
C[idx] = A[idx] + B[idx];
}
}
int main(){
unsigned int N = 100000000;
float *A_h, *B_h,*C_h;
float *A_d, *B_d,*C_d;
// Allocate host memory
A_h = (float *)malloc(N * sizeof(float));
B_h = (float *)malloc(N * sizeof(float));
C_h = (float *)malloc(N * sizeof(float));
for (unsigned int i = 0; i<N; i++){
A_h[i] = 1.0f;
B_h[i] = 2.0f;
}
// Allocate device memory
cudaMalloc(&A_d, N*sizeof(float));
cudaMalloc(&B_d, N*sizeof(float));
cudaMalloc(&C_d, N*sizeof(float));
//memory copy from host to device
cudaMemcpy(A_d, A_h, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, N*sizeof(float), cudaMemcpyHostToDevice);
unsigned int blockSize = 1024;
unsigned int gridSize = (unsigned int)ceil((double)N/blockSize);
//dim3 gridDim =
clock_t start = clock();
vecAddKernel<<< gridSize, blockSize >>>(A_d, B_d, C_d, N);
clock_t end = clock();
clock_t gpu_time = end - start;
printf("GPU Computation over, time: %f \n", (double)gpu_time);
cudaMemcpy(C_h, C_d, N*sizeof(float), cudaMemcpyDeviceToHost);
float *C_ht;
C_ht = (float *)malloc(N * sizeof(float));
start = clock();
for(unsigned int i = 0; i < N; i++){
C_ht[i] = A_h[i] + B_h[i];
}
end = clock();
clock_t cpu_time = end - start;
printf("CPU Computation over, time: %f \n", (double)cpu_time);
printf("GPU is %f times faster\n", (double)cpu_time/gpu_time);
bool valid = true;
for(unsigned int i = 0; i < N; i++){
if(C_h[i] != C_ht[i]){
printf("Wrong! %f vs %f\n", C_h[i], C_ht[i]);
valid = false;
break;
}
}
printf("Correct!!\n");
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
free(A_h);
free(B_h);
free(C_h);
free(C_ht);
return 0;
} | .file "tmpxft_00100dbf_00000000-6_ex1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z12vecAddKernelPfS_S_jPfS_S_j
.type _Z37__device_stub__Z12vecAddKernelPfS_S_jPfS_S_j, @function
_Z37__device_stub__Z12vecAddKernelPfS_S_jPfS_S_j:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12vecAddKernelPfS_S_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z37__device_stub__Z12vecAddKernelPfS_S_jPfS_S_j, .-_Z37__device_stub__Z12vecAddKernelPfS_S_jPfS_S_j
.globl _Z12vecAddKernelPfS_S_j
.type _Z12vecAddKernelPfS_S_j, @function
_Z12vecAddKernelPfS_S_j:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z12vecAddKernelPfS_S_jPfS_S_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z12vecAddKernelPfS_S_j, .-_Z12vecAddKernelPfS_S_j
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "GPU Computation over, time: %f \n"
.align 8
.LC3:
.string "CPU Computation over, time: %f \n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string "GPU is %f times faster\n"
.LC5:
.string "Wrong! %f vs %f\n"
.LC6:
.string "Correct!!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $80, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $400000000, %edi
call malloc@PLT
movq %rax, %rbp
movl $400000000, %edi
call malloc@PLT
movq %rax, %rbx
movl $400000000, %edi
call malloc@PLT
movq %rax, %r13
movl $0, %eax
movss .LC0(%rip), %xmm1
movss .LC1(%rip), %xmm0
.L12:
movss %xmm1, 0(%rbp,%rax)
movss %xmm0, (%rbx,%rax)
addq $4, %rax
cmpq $400000000, %rax
jne .L12
leaq 24(%rsp), %rdi
movl $400000000, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $400000000, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $400000000, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $400000000, %edx
movq %rbp, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $400000000, %edx
movq %rbx, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
call clock@PLT
movq %rax, %r12
movl $1024, 60(%rsp)
movl $1, 64(%rsp)
movl $97657, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L13:
call clock@PLT
subq %r12, %rax
pxor %xmm2, %xmm2
cvtsi2sdq %rax, %xmm2
movsd %xmm2, 8(%rsp)
movapd %xmm2, %xmm0
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movl $400000000, %edx
movq 40(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl $400000000, %edi
call malloc@PLT
movq %rax, %r12
call clock@PLT
movq %rax, %r14
movl $0, %eax
.L14:
movss 0(%rbp,%rax), %xmm0
addss (%rbx,%rax), %xmm0
movss %xmm0, (%r12,%rax)
addq $4, %rax
cmpq $400000000, %rax
jne .L14
call clock@PLT
subq %r14, %rax
pxor %xmm3, %xmm3
cvtsi2sdq %rax, %xmm3
movq %xmm3, %r14
movapd %xmm3, %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %r14, %xmm0
divsd 8(%rsp), %xmm0
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $0, %eax
.L18:
movss 0(%r13,%rax), %xmm0
movss (%r12,%rax), %xmm1
ucomiss %xmm1, %xmm0
jp .L20
jne .L20
addq $4, %rax
cmpq $400000000, %rax
jne .L18
jmp .L17
.L25:
movl $100000000, %ecx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z37__device_stub__Z12vecAddKernelPfS_S_jPfS_S_j
jmp .L13
.L20:
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
.L17:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L26
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z12vecAddKernelPfS_S_j"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z12vecAddKernelPfS_S_j(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.align 4
.LC1:
.long 1073741824
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include <time.h>
#include<cuda.h>
//kernel
__global__
void vecAddKernel(float *A, float *B, float *C, unsigned int N){
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N){
C[idx] = A[idx] + B[idx];
}
}
int main(){
unsigned int N = 100000000;
float *A_h, *B_h,*C_h;
float *A_d, *B_d,*C_d;
// Allocate host memory
A_h = (float *)malloc(N * sizeof(float));
B_h = (float *)malloc(N * sizeof(float));
C_h = (float *)malloc(N * sizeof(float));
for (unsigned int i = 0; i<N; i++){
A_h[i] = 1.0f;
B_h[i] = 2.0f;
}
// Allocate device memory
cudaMalloc(&A_d, N*sizeof(float));
cudaMalloc(&B_d, N*sizeof(float));
cudaMalloc(&C_d, N*sizeof(float));
//memory copy from host to device
cudaMemcpy(A_d, A_h, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, N*sizeof(float), cudaMemcpyHostToDevice);
unsigned int blockSize = 1024;
unsigned int gridSize = (unsigned int)ceil((double)N/blockSize);
//dim3 gridDim =
clock_t start = clock();
vecAddKernel<<< gridSize, blockSize >>>(A_d, B_d, C_d, N);
clock_t end = clock();
clock_t gpu_time = end - start;
printf("GPU Computation over, time: %f \n", (double)gpu_time);
cudaMemcpy(C_h, C_d, N*sizeof(float), cudaMemcpyDeviceToHost);
float *C_ht;
C_ht = (float *)malloc(N * sizeof(float));
start = clock();
for(unsigned int i = 0; i < N; i++){
C_ht[i] = A_h[i] + B_h[i];
}
end = clock();
clock_t cpu_time = end - start;
printf("CPU Computation over, time: %f \n", (double)cpu_time);
printf("GPU is %f times faster\n", (double)cpu_time/gpu_time);
bool valid = true;
for(unsigned int i = 0; i < N; i++){
if(C_h[i] != C_ht[i]){
printf("Wrong! %f vs %f\n", C_h[i], C_ht[i]);
valid = false;
break;
}
}
printf("Correct!!\n");
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
free(A_h);
free(B_h);
free(C_h);
free(C_ht);
return 0;
} | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include <time.h>
#include<hip/hip_runtime.h>
//kernel
__global__
void vecAddKernel(float *A, float *B, float *C, unsigned int N){
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N){
C[idx] = A[idx] + B[idx];
}
}
int main(){
unsigned int N = 100000000;
float *A_h, *B_h,*C_h;
float *A_d, *B_d,*C_d;
// Allocate host memory
A_h = (float *)malloc(N * sizeof(float));
B_h = (float *)malloc(N * sizeof(float));
C_h = (float *)malloc(N * sizeof(float));
for (unsigned int i = 0; i<N; i++){
A_h[i] = 1.0f;
B_h[i] = 2.0f;
}
// Allocate device memory
hipMalloc(&A_d, N*sizeof(float));
hipMalloc(&B_d, N*sizeof(float));
hipMalloc(&C_d, N*sizeof(float));
//memory copy from host to device
hipMemcpy(A_d, A_h, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, N*sizeof(float), hipMemcpyHostToDevice);
unsigned int blockSize = 1024;
unsigned int gridSize = (unsigned int)ceil((double)N/blockSize);
//dim3 gridDim =
clock_t start = clock();
vecAddKernel<<< gridSize, blockSize >>>(A_d, B_d, C_d, N);
clock_t end = clock();
clock_t gpu_time = end - start;
printf("GPU Computation over, time: %f \n", (double)gpu_time);
hipMemcpy(C_h, C_d, N*sizeof(float), hipMemcpyDeviceToHost);
float *C_ht;
C_ht = (float *)malloc(N * sizeof(float));
start = clock();
for(unsigned int i = 0; i < N; i++){
C_ht[i] = A_h[i] + B_h[i];
}
end = clock();
clock_t cpu_time = end - start;
printf("CPU Computation over, time: %f \n", (double)cpu_time);
printf("GPU is %f times faster\n", (double)cpu_time/gpu_time);
bool valid = true;
for(unsigned int i = 0; i < N; i++){
if(C_h[i] != C_ht[i]){
printf("Wrong! %f vs %f\n", C_h[i], C_ht[i]);
valid = false;
break;
}
}
printf("Correct!!\n");
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
free(A_h);
free(B_h);
free(C_h);
free(C_ht);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include <time.h>
#include<hip/hip_runtime.h>
//kernel
__global__
void vecAddKernel(float *A, float *B, float *C, unsigned int N){
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N){
C[idx] = A[idx] + B[idx];
}
}
int main(){
unsigned int N = 100000000;
float *A_h, *B_h,*C_h;
float *A_d, *B_d,*C_d;
// Allocate host memory
A_h = (float *)malloc(N * sizeof(float));
B_h = (float *)malloc(N * sizeof(float));
C_h = (float *)malloc(N * sizeof(float));
for (unsigned int i = 0; i<N; i++){
A_h[i] = 1.0f;
B_h[i] = 2.0f;
}
// Allocate device memory
hipMalloc(&A_d, N*sizeof(float));
hipMalloc(&B_d, N*sizeof(float));
hipMalloc(&C_d, N*sizeof(float));
//memory copy from host to device
hipMemcpy(A_d, A_h, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, N*sizeof(float), hipMemcpyHostToDevice);
unsigned int blockSize = 1024;
unsigned int gridSize = (unsigned int)ceil((double)N/blockSize);
//dim3 gridDim =
clock_t start = clock();
vecAddKernel<<< gridSize, blockSize >>>(A_d, B_d, C_d, N);
clock_t end = clock();
clock_t gpu_time = end - start;
printf("GPU Computation over, time: %f \n", (double)gpu_time);
hipMemcpy(C_h, C_d, N*sizeof(float), hipMemcpyDeviceToHost);
float *C_ht;
C_ht = (float *)malloc(N * sizeof(float));
start = clock();
for(unsigned int i = 0; i < N; i++){
C_ht[i] = A_h[i] + B_h[i];
}
end = clock();
clock_t cpu_time = end - start;
printf("CPU Computation over, time: %f \n", (double)cpu_time);
printf("GPU is %f times faster\n", (double)cpu_time/gpu_time);
bool valid = true;
for(unsigned int i = 0; i < N; i++){
if(C_h[i] != C_ht[i]){
printf("Wrong! %f vs %f\n", C_h[i], C_ht[i]);
valid = false;
break;
}
}
printf("Correct!!\n");
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
free(A_h);
free(B_h);
free(C_h);
free(C_ht);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12vecAddKernelPfS_S_j
.globl _Z12vecAddKernelPfS_S_j
.p2align 8
.type _Z12vecAddKernelPfS_S_j,@function
_Z12vecAddKernelPfS_S_j:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12vecAddKernelPfS_S_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12vecAddKernelPfS_S_j, .Lfunc_end0-_Z12vecAddKernelPfS_S_j
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12vecAddKernelPfS_S_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12vecAddKernelPfS_S_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include <time.h>
#include<hip/hip_runtime.h>
//kernel
__global__
void vecAddKernel(float *A, float *B, float *C, unsigned int N){
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N){
C[idx] = A[idx] + B[idx];
}
}
int main(){
unsigned int N = 100000000;
float *A_h, *B_h,*C_h;
float *A_d, *B_d,*C_d;
// Allocate host memory
A_h = (float *)malloc(N * sizeof(float));
B_h = (float *)malloc(N * sizeof(float));
C_h = (float *)malloc(N * sizeof(float));
for (unsigned int i = 0; i<N; i++){
A_h[i] = 1.0f;
B_h[i] = 2.0f;
}
// Allocate device memory
hipMalloc(&A_d, N*sizeof(float));
hipMalloc(&B_d, N*sizeof(float));
hipMalloc(&C_d, N*sizeof(float));
//memory copy from host to device
hipMemcpy(A_d, A_h, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, N*sizeof(float), hipMemcpyHostToDevice);
unsigned int blockSize = 1024;
unsigned int gridSize = (unsigned int)ceil((double)N/blockSize);
//dim3 gridDim =
clock_t start = clock();
vecAddKernel<<< gridSize, blockSize >>>(A_d, B_d, C_d, N);
clock_t end = clock();
clock_t gpu_time = end - start;
printf("GPU Computation over, time: %f \n", (double)gpu_time);
hipMemcpy(C_h, C_d, N*sizeof(float), hipMemcpyDeviceToHost);
float *C_ht;
C_ht = (float *)malloc(N * sizeof(float));
start = clock();
for(unsigned int i = 0; i < N; i++){
C_ht[i] = A_h[i] + B_h[i];
}
end = clock();
clock_t cpu_time = end - start;
printf("CPU Computation over, time: %f \n", (double)cpu_time);
printf("GPU is %f times faster\n", (double)cpu_time/gpu_time);
bool valid = true;
for(unsigned int i = 0; i < N; i++){
if(C_h[i] != C_ht[i]){
printf("Wrong! %f vs %f\n", C_h[i], C_ht[i]);
valid = false;
break;
}
}
printf("Correct!!\n");
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
free(A_h);
free(B_h);
free(C_h);
free(C_ht);
return 0;
} | .text
.file "ex1.hip"
.globl _Z27__device_stub__vecAddKernelPfS_S_j # -- Begin function _Z27__device_stub__vecAddKernelPfS_S_j
.p2align 4, 0x90
.type _Z27__device_stub__vecAddKernelPfS_S_j,@function
_Z27__device_stub__vecAddKernelPfS_S_j: # @_Z27__device_stub__vecAddKernelPfS_S_j
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12vecAddKernelPfS_S_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z27__device_stub__vecAddKernelPfS_S_j, .Lfunc_end0-_Z27__device_stub__vecAddKernelPfS_S_j
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $168, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $400000000, %edi # imm = 0x17D78400
callq malloc
movq %rax, %rbx
movl $400000000, %edi # imm = 0x17D78400
callq malloc
movq %rax, %r14
movl $400000000, %edi # imm = 0x17D78400
callq malloc
movq %rax, %r15
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%rbx,%rax,4) # imm = 0x3F800000
movl $1073741824, (%r14,%rax,4) # imm = 0x40000000
incq %rax
cmpq $100000000, %rax # imm = 0x5F5E100
jne .LBB1_1
# %bb.2:
leaq 24(%rsp), %rdi
movl $400000000, %esi # imm = 0x17D78400
callq hipMalloc
leaq 16(%rsp), %rdi
movl $400000000, %esi # imm = 0x17D78400
callq hipMalloc
leaq 8(%rsp), %rdi
movl $400000000, %esi # imm = 0x17D78400
callq hipMalloc
movq 24(%rsp), %rdi
movl $400000000, %edx # imm = 0x17D78400
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movl $400000000, %edx # imm = 0x17D78400
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
callq clock
movq %rax, %r12
movabsq $4294968320, %rdx # imm = 0x100000400
leaq 96633(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movq %rdx, 104(%rsp)
movl $100000000, 36(%rsp) # imm = 0x5F5E100
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 36(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z12vecAddKernelPfS_S_j, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
callq clock
subq %r12, %rax
cvtsi2sd %rax, %xmm0
movl $.L.str, %edi
movsd %xmm0, 48(%rsp) # 8-byte Spill
movb $1, %al
callq printf
movq 8(%rsp), %rsi
movl $400000000, %edx # imm = 0x17D78400
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movl $400000000, %edi # imm = 0x17D78400
callq malloc
movq %rax, %r12
xorl %ebp, %ebp
callq clock
movq %rax, %r13
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movss (%rbx,%rbp,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%r14,%rbp,4), %xmm0
movss %xmm0, (%r12,%rbp,4)
incq %rbp
cmpq $100000000, %rbp # imm = 0x5F5E100
jne .LBB1_5
# %bb.6:
callq clock
subq %r13, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
movsd %xmm0, 40(%rsp) # 8-byte Spill
movl $.L.str.1, %edi
movb $1, %al
callq printf
movsd 40(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
divsd 48(%rsp), %xmm0 # 8-byte Folded Reload
movl $.L.str.2, %edi
movb $1, %al
callq printf
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_8: # =>This Inner Loop Header: Depth=1
movss (%r15,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%r12,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm1, %xmm0
jne .LBB1_9
jp .LBB1_9
# %bb.7: # in Loop: Header=BB1_8 Depth=1
incq %rax
cmpq $100000000, %rax # imm = 0x5F5E100
jne .LBB1_8
jmp .LBB1_10
.LBB1_9:
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl $.L.str.3, %edi
movb $2, %al
callq printf
.LBB1_10: # %.loopexit
movl $.Lstr, %edi
callq puts@PLT
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
xorl %eax, %eax
addq $168, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12vecAddKernelPfS_S_j, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12vecAddKernelPfS_S_j,@object # @_Z12vecAddKernelPfS_S_j
.section .rodata,"a",@progbits
.globl _Z12vecAddKernelPfS_S_j
.p2align 3, 0x0
_Z12vecAddKernelPfS_S_j:
.quad _Z27__device_stub__vecAddKernelPfS_S_j
.size _Z12vecAddKernelPfS_S_j, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "GPU Computation over, time: %f \n"
.size .L.str, 33
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "CPU Computation over, time: %f \n"
.size .L.str.1, 33
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "GPU is %f times faster\n"
.size .L.str.2, 24
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Wrong! %f vs %f\n"
.size .L.str.3, 17
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12vecAddKernelPfS_S_j"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Correct!!"
.size .Lstr, 10
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__vecAddKernelPfS_S_j
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12vecAddKernelPfS_S_j
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z12vecAddKernelPfS_S_j
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE.U32 R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0007 */
/*0090*/ IMAD.WIDE.U32 R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0007 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0007 */
/*00d0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12vecAddKernelPfS_S_j
.globl _Z12vecAddKernelPfS_S_j
.p2align 8
.type _Z12vecAddKernelPfS_S_j,@function
_Z12vecAddKernelPfS_S_j:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12vecAddKernelPfS_S_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12vecAddKernelPfS_S_j, .Lfunc_end0-_Z12vecAddKernelPfS_S_j
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12vecAddKernelPfS_S_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12vecAddKernelPfS_S_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.