system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdlib.h>
#include <stdio.h>
#define MY_CUDA_CHECK( call) { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
#define MY_CHECK_ERROR(errorMessage) { \
cudaError_t err = cudaGetLastError(); \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \
errorMessage, __FILE__, __LINE__, cudaGetErrorString( err) );\
exit(EXIT_FAILURE); \
} \
}
void writelog(int, int, const char *, ...);
#define MAKEMATR_RC 1
#if !defined(TRUE)
enum {FALSE, TRUE};
#endif
#if !defined(MAKEMATR_RC)
#define MAKEMATR_RC 12
#endif
void **mmcuda(void ***rp, int r, int c, int s, int init) {
int i;
char **pc;
short int **psi;
int **pi;
double **pd;
char **d_pc;
short int **d_psi;
int **d_pi;
double **d_pd;
switch(s) {
case sizeof(char):
pc=(char **)malloc(r*sizeof(char *));
if(!pc) writelog(TRUE, MAKEMATR_RC, "error in makematr 1\n");
MY_CUDA_CHECK( cudaMalloc( (void **) &d_pc, r*sizeof(char*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( cudaMalloc( (void **) &pc[i], c*sizeof(char) ) );
if(init) {
MY_CUDA_CHECK( cudaMemset( pc[i], 0, c*sizeof(char) ) );
}
}
MY_CUDA_CHECK( cudaMemcpy( d_pc, pc, r*sizeof(char *), cudaMemcpyHostToDevice ) );
rp[0]=(void **)d_pc;
return (void **)pc;
case sizeof(short int):
psi=(short int **)malloc(r*sizeof(short int*));
if(!psi) writelog(TRUE, MAKEMATR_RC, "error in makematr 2\n");
MY_CUDA_CHECK( cudaMalloc( (void **) &d_psi, r*sizeof(short int*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( cudaMalloc( (void **) &psi[i], c*sizeof(short int) ) );
if(init) {
MY_CUDA_CHECK( cudaMemset( psi[i], 0, c*sizeof(short int) ) );
}
}
MY_CUDA_CHECK( cudaMemcpy( d_psi, psi, r*sizeof(short int*), cudaMemcpyHostToDevice ) );
rp[0]=(void **)d_psi;
return (void **)psi;
case sizeof(int):
pi=(int **)malloc(r*sizeof(int*));
if(!pi) writelog(TRUE, MAKEMATR_RC, "error in makematr 3\n");
MY_CUDA_CHECK( cudaMalloc( (void **) &d_pi, r*sizeof(int*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( cudaMalloc( (void **) &pi[i], c*sizeof(int) ) );
if(init) {
MY_CUDA_CHECK( cudaMemset( pi[i], 0, c*sizeof(int) ) );
}
}
MY_CUDA_CHECK( cudaMemcpy( d_pi, pi, r*sizeof(int *), cudaMemcpyHostToDevice ) );
rp[0]=(void **)d_pi;
return (void **)pi;
case sizeof(double):
pd=(double **)malloc(r*sizeof(double*));
if(!pd) writelog(TRUE, MAKEMATR_RC, "error in makematr 4 for %d rows\n",r);
MY_CUDA_CHECK( cudaMalloc( (void **) &d_pd, r*sizeof(double*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( cudaMalloc( (void **) &pd[i], c*sizeof(double) ) );
if(init) {
MY_CUDA_CHECK( cudaMemset( pd[i], 0, c*sizeof(double) ) );
}
}
MY_CUDA_CHECK( cudaMemcpy( d_pd, pd, r*sizeof(double *), cudaMemcpyHostToDevice ) );
rp[0]=(void **)d_pd;
return (void **)pd;
default:
writelog(TRUE,MAKEMATR_RC,"Unexpected size: %d\n",s);
break;
}
return NULL;
} | #include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define MY_CUDA_CHECK( call) { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
#define MY_CHECK_ERROR(errorMessage) { \
hipError_t err = hipGetLastError(); \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \
errorMessage, __FILE__, __LINE__, hipGetErrorString( err) );\
exit(EXIT_FAILURE); \
} \
}
void writelog(int, int, const char *, ...);
#define MAKEMATR_RC 1
#if !defined(TRUE)
enum {FALSE, TRUE};
#endif
#if !defined(MAKEMATR_RC)
#define MAKEMATR_RC 12
#endif
void **mmcuda(void ***rp, int r, int c, int s, int init) {
int i;
char **pc;
short int **psi;
int **pi;
double **pd;
char **d_pc;
short int **d_psi;
int **d_pi;
double **d_pd;
switch(s) {
case sizeof(char):
pc=(char **)malloc(r*sizeof(char *));
if(!pc) writelog(TRUE, MAKEMATR_RC, "error in makematr 1\n");
MY_CUDA_CHECK( hipMalloc( (void **) &d_pc, r*sizeof(char*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( hipMalloc( (void **) &pc[i], c*sizeof(char) ) );
if(init) {
MY_CUDA_CHECK( hipMemset( pc[i], 0, c*sizeof(char) ) );
}
}
MY_CUDA_CHECK( hipMemcpy( d_pc, pc, r*sizeof(char *), hipMemcpyHostToDevice ) );
rp[0]=(void **)d_pc;
return (void **)pc;
case sizeof(short int):
psi=(short int **)malloc(r*sizeof(short int*));
if(!psi) writelog(TRUE, MAKEMATR_RC, "error in makematr 2\n");
MY_CUDA_CHECK( hipMalloc( (void **) &d_psi, r*sizeof(short int*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( hipMalloc( (void **) &psi[i], c*sizeof(short int) ) );
if(init) {
MY_CUDA_CHECK( hipMemset( psi[i], 0, c*sizeof(short int) ) );
}
}
MY_CUDA_CHECK( hipMemcpy( d_psi, psi, r*sizeof(short int*), hipMemcpyHostToDevice ) );
rp[0]=(void **)d_psi;
return (void **)psi;
case sizeof(int):
pi=(int **)malloc(r*sizeof(int*));
if(!pi) writelog(TRUE, MAKEMATR_RC, "error in makematr 3\n");
MY_CUDA_CHECK( hipMalloc( (void **) &d_pi, r*sizeof(int*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( hipMalloc( (void **) &pi[i], c*sizeof(int) ) );
if(init) {
MY_CUDA_CHECK( hipMemset( pi[i], 0, c*sizeof(int) ) );
}
}
MY_CUDA_CHECK( hipMemcpy( d_pi, pi, r*sizeof(int *), hipMemcpyHostToDevice ) );
rp[0]=(void **)d_pi;
return (void **)pi;
case sizeof(double):
pd=(double **)malloc(r*sizeof(double*));
if(!pd) writelog(TRUE, MAKEMATR_RC, "error in makematr 4 for %d rows\n",r);
MY_CUDA_CHECK( hipMalloc( (void **) &d_pd, r*sizeof(double*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( hipMalloc( (void **) &pd[i], c*sizeof(double) ) );
if(init) {
MY_CUDA_CHECK( hipMemset( pd[i], 0, c*sizeof(double) ) );
}
}
MY_CUDA_CHECK( hipMemcpy( d_pd, pd, r*sizeof(double *), hipMemcpyHostToDevice ) );
rp[0]=(void **)d_pd;
return (void **)pd;
default:
writelog(TRUE,MAKEMATR_RC,"Unexpected size: %d\n",s);
break;
}
return NULL;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define MY_CUDA_CHECK( call) { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
#define MY_CHECK_ERROR(errorMessage) { \
hipError_t err = hipGetLastError(); \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \
errorMessage, __FILE__, __LINE__, hipGetErrorString( err) );\
exit(EXIT_FAILURE); \
} \
}
void writelog(int, int, const char *, ...);
#define MAKEMATR_RC 1
#if !defined(TRUE)
enum {FALSE, TRUE};
#endif
#if !defined(MAKEMATR_RC)
#define MAKEMATR_RC 12
#endif
void **mmcuda(void ***rp, int r, int c, int s, int init) {
int i;
char **pc;
short int **psi;
int **pi;
double **pd;
char **d_pc;
short int **d_psi;
int **d_pi;
double **d_pd;
switch(s) {
case sizeof(char):
pc=(char **)malloc(r*sizeof(char *));
if(!pc) writelog(TRUE, MAKEMATR_RC, "error in makematr 1\n");
MY_CUDA_CHECK( hipMalloc( (void **) &d_pc, r*sizeof(char*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( hipMalloc( (void **) &pc[i], c*sizeof(char) ) );
if(init) {
MY_CUDA_CHECK( hipMemset( pc[i], 0, c*sizeof(char) ) );
}
}
MY_CUDA_CHECK( hipMemcpy( d_pc, pc, r*sizeof(char *), hipMemcpyHostToDevice ) );
rp[0]=(void **)d_pc;
return (void **)pc;
case sizeof(short int):
psi=(short int **)malloc(r*sizeof(short int*));
if(!psi) writelog(TRUE, MAKEMATR_RC, "error in makematr 2\n");
MY_CUDA_CHECK( hipMalloc( (void **) &d_psi, r*sizeof(short int*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( hipMalloc( (void **) &psi[i], c*sizeof(short int) ) );
if(init) {
MY_CUDA_CHECK( hipMemset( psi[i], 0, c*sizeof(short int) ) );
}
}
MY_CUDA_CHECK( hipMemcpy( d_psi, psi, r*sizeof(short int*), hipMemcpyHostToDevice ) );
rp[0]=(void **)d_psi;
return (void **)psi;
case sizeof(int):
pi=(int **)malloc(r*sizeof(int*));
if(!pi) writelog(TRUE, MAKEMATR_RC, "error in makematr 3\n");
MY_CUDA_CHECK( hipMalloc( (void **) &d_pi, r*sizeof(int*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( hipMalloc( (void **) &pi[i], c*sizeof(int) ) );
if(init) {
MY_CUDA_CHECK( hipMemset( pi[i], 0, c*sizeof(int) ) );
}
}
MY_CUDA_CHECK( hipMemcpy( d_pi, pi, r*sizeof(int *), hipMemcpyHostToDevice ) );
rp[0]=(void **)d_pi;
return (void **)pi;
case sizeof(double):
pd=(double **)malloc(r*sizeof(double*));
if(!pd) writelog(TRUE, MAKEMATR_RC, "error in makematr 4 for %d rows\n",r);
MY_CUDA_CHECK( hipMalloc( (void **) &d_pd, r*sizeof(double*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( hipMalloc( (void **) &pd[i], c*sizeof(double) ) );
if(init) {
MY_CUDA_CHECK( hipMemset( pd[i], 0, c*sizeof(double) ) );
}
}
MY_CUDA_CHECK( hipMemcpy( d_pd, pd, r*sizeof(double *), hipMemcpyHostToDevice ) );
rp[0]=(void **)d_pd;
return (void **)pd;
default:
writelog(TRUE,MAKEMATR_RC,"Unexpected size: %d\n",s);
break;
}
return NULL;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define MY_CUDA_CHECK( call) { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
#define MY_CHECK_ERROR(errorMessage) { \
hipError_t err = hipGetLastError(); \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \
errorMessage, __FILE__, __LINE__, hipGetErrorString( err) );\
exit(EXIT_FAILURE); \
} \
}
void writelog(int, int, const char *, ...);
#define MAKEMATR_RC 1
#if !defined(TRUE)
enum {FALSE, TRUE};
#endif
#if !defined(MAKEMATR_RC)
#define MAKEMATR_RC 12
#endif
void **mmcuda(void ***rp, int r, int c, int s, int init) {
int i;
char **pc;
short int **psi;
int **pi;
double **pd;
char **d_pc;
short int **d_psi;
int **d_pi;
double **d_pd;
switch(s) {
case sizeof(char):
pc=(char **)malloc(r*sizeof(char *));
if(!pc) writelog(TRUE, MAKEMATR_RC, "error in makematr 1\n");
MY_CUDA_CHECK( hipMalloc( (void **) &d_pc, r*sizeof(char*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( hipMalloc( (void **) &pc[i], c*sizeof(char) ) );
if(init) {
MY_CUDA_CHECK( hipMemset( pc[i], 0, c*sizeof(char) ) );
}
}
MY_CUDA_CHECK( hipMemcpy( d_pc, pc, r*sizeof(char *), hipMemcpyHostToDevice ) );
rp[0]=(void **)d_pc;
return (void **)pc;
case sizeof(short int):
psi=(short int **)malloc(r*sizeof(short int*));
if(!psi) writelog(TRUE, MAKEMATR_RC, "error in makematr 2\n");
MY_CUDA_CHECK( hipMalloc( (void **) &d_psi, r*sizeof(short int*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( hipMalloc( (void **) &psi[i], c*sizeof(short int) ) );
if(init) {
MY_CUDA_CHECK( hipMemset( psi[i], 0, c*sizeof(short int) ) );
}
}
MY_CUDA_CHECK( hipMemcpy( d_psi, psi, r*sizeof(short int*), hipMemcpyHostToDevice ) );
rp[0]=(void **)d_psi;
return (void **)psi;
case sizeof(int):
pi=(int **)malloc(r*sizeof(int*));
if(!pi) writelog(TRUE, MAKEMATR_RC, "error in makematr 3\n");
MY_CUDA_CHECK( hipMalloc( (void **) &d_pi, r*sizeof(int*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( hipMalloc( (void **) &pi[i], c*sizeof(int) ) );
if(init) {
MY_CUDA_CHECK( hipMemset( pi[i], 0, c*sizeof(int) ) );
}
}
MY_CUDA_CHECK( hipMemcpy( d_pi, pi, r*sizeof(int *), hipMemcpyHostToDevice ) );
rp[0]=(void **)d_pi;
return (void **)pi;
case sizeof(double):
pd=(double **)malloc(r*sizeof(double*));
if(!pd) writelog(TRUE, MAKEMATR_RC, "error in makematr 4 for %d rows\n",r);
MY_CUDA_CHECK( hipMalloc( (void **) &d_pd, r*sizeof(double*) ) );
for(i=0; i<r; i++) {
MY_CUDA_CHECK( hipMalloc( (void **) &pd[i], c*sizeof(double) ) );
if(init) {
MY_CUDA_CHECK( hipMemset( pd[i], 0, c*sizeof(double) ) );
}
}
MY_CUDA_CHECK( hipMemcpy( d_pd, pd, r*sizeof(double *), hipMemcpyHostToDevice ) );
rp[0]=(void **)d_pd;
return (void **)pd;
default:
writelog(TRUE,MAKEMATR_RC,"Unexpected size: %d\n",s);
break;
}
return NULL;
} | .text
.file "mmcuda.hip"
.globl _Z6mmcudaPPPviiii # -- Begin function _Z6mmcudaPPPviiii
.p2align 4, 0x90
.type _Z6mmcudaPPPviiii,@function
_Z6mmcudaPPPviiii: # @_Z6mmcudaPPPviiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
# kill: def $ecx killed $ecx def $rcx
leal -1(%rcx), %eax
cmpl $7, %eax
ja .LBB0_57
# %bb.1:
movl %r8d, %ebp
movl %edx, %r12d
movl %esi, %r15d
jmpq *.LJTI0_0(,%rax,8)
.LBB0_2:
movq %rdi, 16(%rsp) # 8-byte Spill
movslq %r15d, %r13
shlq $3, %r13
movq %r13, %rdi
callq malloc
movq %rax, %r14
testq %rax, %rax
jne .LBB0_4
# %bb.3:
movl $.L.str, %edx
movl $1, %edi
movl $1, %esi
xorl %eax, %eax
callq _Z8writelogiiPKcz
.LBB0_4:
leaq 8(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_9
# %bb.5: # %.preheader
testl %r15d, %r15d
jle .LBB0_15
# %bb.6: # %.lr.ph165
movslq %r12d, %r12
movl %r15d, %ebx
movq %r14, %r15
jmp .LBB0_7
.p2align 4, 0x90
.LBB0_14: # in Loop: Header=BB0_7 Depth=1
addq $8, %r15
decq %rbx
je .LBB0_15
.LBB0_7: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_8
# %bb.11: # in Loop: Header=BB0_7 Depth=1
testl %ebp, %ebp
je .LBB0_14
# %bb.12: # in Loop: Header=BB0_7 Depth=1
movq (%r15), %rdi
xorl %esi, %esi
movq %r12, %rdx
callq hipMemset
testl %eax, %eax
je .LBB0_14
# %bb.13:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $52, %ecx
jmp .LBB0_10
.LBB0_30:
movq %rdi, 16(%rsp) # 8-byte Spill
movslq %r15d, %r13
shlq $3, %r13
movq %r13, %rdi
callq malloc
movq %rax, %r14
testq %rax, %rax
jne .LBB0_32
# %bb.31:
movl $.L.str.4, %edx
movl $1, %edi
movl $1, %esi
xorl %eax, %eax
callq _Z8writelogiiPKcz
.LBB0_32:
leaq 8(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_60
# %bb.33: # %.preheader144
testl %r15d, %r15d
jle .LBB0_41
# %bb.34: # %.lr.ph159
movslq %r12d, %r12
shlq $2, %r12
movl %r15d, %ebx
movq %r14, %r15
jmp .LBB0_35
.p2align 4, 0x90
.LBB0_40: # in Loop: Header=BB0_35 Depth=1
addq $8, %r15
decq %rbx
je .LBB0_41
.LBB0_35: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_36
# %bb.37: # in Loop: Header=BB0_35 Depth=1
testl %ebp, %ebp
je .LBB0_40
# %bb.38: # in Loop: Header=BB0_35 Depth=1
movq (%r15), %rdi
xorl %esi, %esi
movq %r12, %rdx
callq hipMemset
testl %eax, %eax
je .LBB0_40
# %bb.39:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $78, %ecx
jmp .LBB0_10
.LBB0_17:
movq %rdi, 16(%rsp) # 8-byte Spill
movslq %r15d, %r13
shlq $3, %r13
movq %r13, %rdi
callq malloc
movq %rax, %r14
testq %rax, %rax
jne .LBB0_19
# %bb.18:
movl $.L.str.3, %edx
movl $1, %edi
movl $1, %esi
xorl %eax, %eax
callq _Z8writelogiiPKcz
.LBB0_19:
leaq 8(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_59
# %bb.20: # %.preheader143
testl %r15d, %r15d
jle .LBB0_28
# %bb.21: # %.lr.ph162
movslq %r12d, %r12
addq %r12, %r12
movl %r15d, %ebx
movq %r14, %r15
jmp .LBB0_22
.p2align 4, 0x90
.LBB0_27: # in Loop: Header=BB0_22 Depth=1
addq $8, %r15
decq %rbx
je .LBB0_28
.LBB0_22: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_23
# %bb.24: # in Loop: Header=BB0_22 Depth=1
testl %ebp, %ebp
je .LBB0_27
# %bb.25: # in Loop: Header=BB0_22 Depth=1
movq (%r15), %rdi
xorl %esi, %esi
movq %r12, %rdx
callq hipMemset
testl %eax, %eax
je .LBB0_27
# %bb.26:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $65, %ecx
jmp .LBB0_10
.LBB0_43:
movq %rdi, 16(%rsp) # 8-byte Spill
movslq %r15d, %r13
shlq $3, %r13
movq %r13, %rdi
callq malloc
movq %rax, %r14
testq %rax, %rax
jne .LBB0_45
# %bb.44:
movl $.L.str.5, %edx
movl $1, %edi
movl $1, %esi
movl %r15d, %ecx
xorl %eax, %eax
callq _Z8writelogiiPKcz
.LBB0_45:
leaq 8(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_61
# %bb.46: # %.preheader145
testl %r15d, %r15d
jle .LBB0_54
# %bb.47: # %.lr.ph
movslq %r12d, %r12
shlq $3, %r12
movl %r15d, %ebx
movq %r14, %r15
jmp .LBB0_48
.p2align 4, 0x90
.LBB0_53: # in Loop: Header=BB0_48 Depth=1
addq $8, %r15
decq %rbx
je .LBB0_54
.LBB0_48: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_49
# %bb.50: # in Loop: Header=BB0_48 Depth=1
testl %ebp, %ebp
je .LBB0_53
# %bb.51: # in Loop: Header=BB0_48 Depth=1
movq (%r15), %rdi
xorl %esi, %esi
movq %r12, %rdx
callq hipMemset
testl %eax, %eax
je .LBB0_53
# %bb.52:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $91, %ecx
jmp .LBB0_10
.LBB0_57:
xorl %r14d, %r14d
movl $.L.str.6, %edx
movl $1, %edi
movl $1, %esi
# kill: def $ecx killed $ecx killed $rcx
xorl %eax, %eax
callq _Z8writelogiiPKcz
jmp .LBB0_58
.LBB0_41: # %._crit_edge160
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB0_56
# %bb.42:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $81, %ecx
jmp .LBB0_10
.LBB0_15: # %._crit_edge166
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB0_56
# %bb.16:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $55, %ecx
jmp .LBB0_10
.LBB0_28: # %._crit_edge163
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB0_56
# %bb.29:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $68, %ecx
jmp .LBB0_10
.LBB0_54: # %._crit_edge
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB0_55
.LBB0_56:
movq 8(%rsp), %rax
movq 16(%rsp), %rcx # 8-byte Reload
movq %rax, (%rcx)
.LBB0_58:
movq %r14, %rax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_36:
.cfi_def_cfa_offset 80
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $76, %ecx
jmp .LBB0_10
.LBB0_8:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $50, %ecx
jmp .LBB0_10
.LBB0_23:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $63, %ecx
jmp .LBB0_10
.LBB0_49:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $89, %ecx
.LBB0_10:
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.LBB0_60:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $74, %ecx
jmp .LBB0_10
.LBB0_9:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $48, %ecx
jmp .LBB0_10
.LBB0_59:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $61, %ecx
jmp .LBB0_10
.LBB0_61:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $87, %ecx
jmp .LBB0_10
.LBB0_55:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $94, %ecx
jmp .LBB0_10
.Lfunc_end0:
.size _Z6mmcudaPPPviiii, .Lfunc_end0-_Z6mmcudaPPPviiii
.cfi_endproc
.section .rodata,"a",@progbits
.p2align 3, 0x0
.LJTI0_0:
.quad .LBB0_2
.quad .LBB0_17
.quad .LBB0_57
.quad .LBB0_30
.quad .LBB0_57
.quad .LBB0_57
.quad .LBB0_57
.quad .LBB0_43
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "error in makematr 1\n"
.size .L.str, 21
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Cuda error in file '%s' in line %i : %s.\n"
.size .L.str.1, 42
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/mbernaschi/StrongErgodicityBreaking/master/SBcode/GPU/mmcuda.hip"
.size .L.str.2, 122
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "error in makematr 2\n"
.size .L.str.3, 21
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "error in makematr 3\n"
.size .L.str.4, 21
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "error in makematr 4 for %d rows\n"
.size .L.str.5, 33
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Unexpected size: %d\n"
.size .L.str.6, 21
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00138fb7_00000000-6_mmcuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "error in makematr 1\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "/home/ubuntu/Datasets/stackv2/train-structured/mbernaschi/StrongErgodicityBreaking/master/SBcode/GPU/mmcuda.cu"
.align 8
.LC2:
.string "Cuda error in file '%s' in line %i : %s.\n"
.section .rodata.str1.1
.LC3:
.string "error in makematr 2\n"
.LC4:
.string "error in makematr 3\n"
.section .rodata.str1.8
.align 8
.LC5:
.string "error in makematr 4 for %d rows\n"
.section .rodata.str1.1
.LC6:
.string "Unexpected size: %d\n"
.text
.globl _Z6mmcudaPPPviiii
.type _Z6mmcudaPPPviiii, @function
_Z6mmcudaPPPviiii:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, (%rsp)
movl %esi, %r14d
movl %edx, %r13d
movl %r8d, %ebp
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
cmpl $4, %ecx
je .L4
jg .L5
cmpl $1, %ecx
je .L6
cmpl $2, %ecx
jne .L8
movslq %esi, %rax
salq $3, %rax
movq %rax, 8(%rsp)
movq %rax, %rdi
call malloc@PLT
movq %rax, %r12
testq %rax, %rax
je .L50
.L19:
leaq 16(%rsp), %rdi
movq 8(%rsp), %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L20
testl %r14d, %r14d
jle .L22
movslq %r13d, %r13
addq %r13, %r13
movq %r12, %rbx
movq 8(%rsp), %rax
leaq (%rax,%r12), %r15
jmp .L25
.L5:
cmpl $8, %ecx
jne .L8
movslq %esi, %rax
salq $3, %rax
movq %rax, 8(%rsp)
movq %rax, %rdi
call malloc@PLT
movq %rax, %r12
testq %rax, %rax
je .L51
.L35:
leaq 16(%rsp), %rdi
movq 8(%rsp), %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L36
testl %r14d, %r14d
jle .L38
movslq %r13d, %r13
salq $3, %r13
movq %r12, %rbx
movq 8(%rsp), %rax
leaq (%rax,%r12), %r15
jmp .L41
.L6:
movslq %esi, %rax
salq $3, %rax
movq %rax, 8(%rsp)
movq %rax, %rdi
call malloc@PLT
movq %rax, %r12
testq %rax, %rax
je .L52
.L10:
leaq 16(%rsp), %rdi
movq 8(%rsp), %r15
movq %r15, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L11
movq %r12, %rbx
addq %r12, %r15
movslq %r13d, %r13
testl %r14d, %r14d
jg .L16
.L13:
movl $1, %ecx
movq 8(%rsp), %rdx
movq %r12, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L53
movq 16(%rsp), %rax
movq (%rsp), %rcx
movq %rax, (%rcx)
jmp .L3
.L52:
leaq .LC0(%rip), %rdx
movl $1, %esi
movl $1, %edi
movl $0, %eax
call _Z8writelogiiPKcz@PLT
jmp .L10
.L11:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $46, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L54:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $48, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L15:
addq $8, %rbx
cmpq %r15, %rbx
je .L13
.L16:
movq %r13, %rsi
movq %rbx, %rdi
call cudaMalloc@PLT
testl %eax, %eax
jne .L54
testl %ebp, %ebp
je .L15
movq (%rbx), %rdi
movq %r13, %rdx
movl $0, %esi
call cudaMemset@PLT
testl %eax, %eax
je .L15
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $50, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L53:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $53, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L50:
leaq .LC3(%rip), %rdx
movl $1, %esi
movl $1, %edi
movl $0, %eax
call _Z8writelogiiPKcz@PLT
jmp .L19
.L20:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $59, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L55:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $61, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L24:
addq $8, %rbx
cmpq %r15, %rbx
je .L22
.L25:
movq %r13, %rsi
movq %rbx, %rdi
call cudaMalloc@PLT
testl %eax, %eax
jne .L55
testl %ebp, %ebp
je .L24
movq (%rbx), %rdi
movq %r13, %rdx
movl $0, %esi
call cudaMemset@PLT
testl %eax, %eax
je .L24
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $63, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L22:
movl $1, %ecx
movq 8(%rsp), %rdx
movq %r12, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L56
movq 16(%rsp), %rax
movq (%rsp), %rcx
movq %rax, (%rcx)
jmp .L3
.L56:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $66, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L4:
movslq %esi, %rax
salq $3, %rax
movq %rax, 8(%rsp)
movq %rax, %rdi
call malloc@PLT
movq %rax, %r12
testq %rax, %rax
je .L57
.L27:
leaq 16(%rsp), %rdi
movq 8(%rsp), %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L28
testl %r14d, %r14d
jle .L30
movslq %r13d, %r13
salq $2, %r13
movq %r12, %rbx
movq 8(%rsp), %rax
leaq (%rax,%r12), %r15
jmp .L33
.L57:
leaq .LC4(%rip), %rdx
movl $1, %esi
movl $1, %edi
movl $0, %eax
call _Z8writelogiiPKcz@PLT
jmp .L27
.L28:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $72, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L58:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $74, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L32:
addq $8, %rbx
cmpq %r15, %rbx
je .L30
.L33:
movq %r13, %rsi
movq %rbx, %rdi
call cudaMalloc@PLT
testl %eax, %eax
jne .L58
testl %ebp, %ebp
je .L32
movq (%rbx), %rdi
movq %r13, %rdx
movl $0, %esi
call cudaMemset@PLT
testl %eax, %eax
je .L32
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $76, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L30:
movl $1, %ecx
movq 8(%rsp), %rdx
movq %r12, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L59
movq 16(%rsp), %rax
movq (%rsp), %rcx
movq %rax, (%rcx)
jmp .L3
.L59:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $79, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L51:
movl %r14d, %ecx
leaq .LC5(%rip), %rdx
movl $1, %esi
movl $1, %edi
movl $0, %eax
call _Z8writelogiiPKcz@PLT
jmp .L35
.L36:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $85, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L60:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $87, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L40:
addq $8, %rbx
cmpq %r15, %rbx
je .L38
.L41:
movq %r13, %rsi
movq %rbx, %rdi
call cudaMalloc@PLT
testl %eax, %eax
jne .L60
testl %ebp, %ebp
je .L40
movq (%rbx), %rdi
movq %r13, %rdx
movl $0, %esi
call cudaMemset@PLT
testl %eax, %eax
je .L40
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $89, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L38:
movl $1, %ecx
movq 8(%rsp), %rdx
movq %r12, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L61
movq 16(%rsp), %rax
movq (%rsp), %rcx
movq %rax, (%rcx)
jmp .L3
.L61:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $92, %r8d
leaq .LC1(%rip), %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L8:
leaq .LC6(%rip), %rdx
movl $1, %esi
movl $1, %edi
movl $0, %eax
call _Z8writelogiiPKcz@PLT
movl $0, %r12d
.L3:
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L62
movq %r12, %rax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L62:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z6mmcudaPPPviiii, .-_Z6mmcudaPPPviiii
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "mmcuda.hip"
.globl _Z6mmcudaPPPviiii # -- Begin function _Z6mmcudaPPPviiii
.p2align 4, 0x90
.type _Z6mmcudaPPPviiii,@function
_Z6mmcudaPPPviiii: # @_Z6mmcudaPPPviiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
# kill: def $ecx killed $ecx def $rcx
leal -1(%rcx), %eax
cmpl $7, %eax
ja .LBB0_57
# %bb.1:
movl %r8d, %ebp
movl %edx, %r12d
movl %esi, %r15d
jmpq *.LJTI0_0(,%rax,8)
.LBB0_2:
movq %rdi, 16(%rsp) # 8-byte Spill
movslq %r15d, %r13
shlq $3, %r13
movq %r13, %rdi
callq malloc
movq %rax, %r14
testq %rax, %rax
jne .LBB0_4
# %bb.3:
movl $.L.str, %edx
movl $1, %edi
movl $1, %esi
xorl %eax, %eax
callq _Z8writelogiiPKcz
.LBB0_4:
leaq 8(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_9
# %bb.5: # %.preheader
testl %r15d, %r15d
jle .LBB0_15
# %bb.6: # %.lr.ph165
movslq %r12d, %r12
movl %r15d, %ebx
movq %r14, %r15
jmp .LBB0_7
.p2align 4, 0x90
.LBB0_14: # in Loop: Header=BB0_7 Depth=1
addq $8, %r15
decq %rbx
je .LBB0_15
.LBB0_7: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_8
# %bb.11: # in Loop: Header=BB0_7 Depth=1
testl %ebp, %ebp
je .LBB0_14
# %bb.12: # in Loop: Header=BB0_7 Depth=1
movq (%r15), %rdi
xorl %esi, %esi
movq %r12, %rdx
callq hipMemset
testl %eax, %eax
je .LBB0_14
# %bb.13:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $52, %ecx
jmp .LBB0_10
.LBB0_30:
movq %rdi, 16(%rsp) # 8-byte Spill
movslq %r15d, %r13
shlq $3, %r13
movq %r13, %rdi
callq malloc
movq %rax, %r14
testq %rax, %rax
jne .LBB0_32
# %bb.31:
movl $.L.str.4, %edx
movl $1, %edi
movl $1, %esi
xorl %eax, %eax
callq _Z8writelogiiPKcz
.LBB0_32:
leaq 8(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_60
# %bb.33: # %.preheader144
testl %r15d, %r15d
jle .LBB0_41
# %bb.34: # %.lr.ph159
movslq %r12d, %r12
shlq $2, %r12
movl %r15d, %ebx
movq %r14, %r15
jmp .LBB0_35
.p2align 4, 0x90
.LBB0_40: # in Loop: Header=BB0_35 Depth=1
addq $8, %r15
decq %rbx
je .LBB0_41
.LBB0_35: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_36
# %bb.37: # in Loop: Header=BB0_35 Depth=1
testl %ebp, %ebp
je .LBB0_40
# %bb.38: # in Loop: Header=BB0_35 Depth=1
movq (%r15), %rdi
xorl %esi, %esi
movq %r12, %rdx
callq hipMemset
testl %eax, %eax
je .LBB0_40
# %bb.39:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $78, %ecx
jmp .LBB0_10
.LBB0_17:
movq %rdi, 16(%rsp) # 8-byte Spill
movslq %r15d, %r13
shlq $3, %r13
movq %r13, %rdi
callq malloc
movq %rax, %r14
testq %rax, %rax
jne .LBB0_19
# %bb.18:
movl $.L.str.3, %edx
movl $1, %edi
movl $1, %esi
xorl %eax, %eax
callq _Z8writelogiiPKcz
.LBB0_19:
leaq 8(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_59
# %bb.20: # %.preheader143
testl %r15d, %r15d
jle .LBB0_28
# %bb.21: # %.lr.ph162
movslq %r12d, %r12
addq %r12, %r12
movl %r15d, %ebx
movq %r14, %r15
jmp .LBB0_22
.p2align 4, 0x90
.LBB0_27: # in Loop: Header=BB0_22 Depth=1
addq $8, %r15
decq %rbx
je .LBB0_28
.LBB0_22: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_23
# %bb.24: # in Loop: Header=BB0_22 Depth=1
testl %ebp, %ebp
je .LBB0_27
# %bb.25: # in Loop: Header=BB0_22 Depth=1
movq (%r15), %rdi
xorl %esi, %esi
movq %r12, %rdx
callq hipMemset
testl %eax, %eax
je .LBB0_27
# %bb.26:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $65, %ecx
jmp .LBB0_10
.LBB0_43:
movq %rdi, 16(%rsp) # 8-byte Spill
movslq %r15d, %r13
shlq $3, %r13
movq %r13, %rdi
callq malloc
movq %rax, %r14
testq %rax, %rax
jne .LBB0_45
# %bb.44:
movl $.L.str.5, %edx
movl $1, %edi
movl $1, %esi
movl %r15d, %ecx
xorl %eax, %eax
callq _Z8writelogiiPKcz
.LBB0_45:
leaq 8(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_61
# %bb.46: # %.preheader145
testl %r15d, %r15d
jle .LBB0_54
# %bb.47: # %.lr.ph
movslq %r12d, %r12
shlq $3, %r12
movl %r15d, %ebx
movq %r14, %r15
jmp .LBB0_48
.p2align 4, 0x90
.LBB0_53: # in Loop: Header=BB0_48 Depth=1
addq $8, %r15
decq %rbx
je .LBB0_54
.LBB0_48: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB0_49
# %bb.50: # in Loop: Header=BB0_48 Depth=1
testl %ebp, %ebp
je .LBB0_53
# %bb.51: # in Loop: Header=BB0_48 Depth=1
movq (%r15), %rdi
xorl %esi, %esi
movq %r12, %rdx
callq hipMemset
testl %eax, %eax
je .LBB0_53
# %bb.52:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $91, %ecx
jmp .LBB0_10
.LBB0_57:
xorl %r14d, %r14d
movl $.L.str.6, %edx
movl $1, %edi
movl $1, %esi
# kill: def $ecx killed $ecx killed $rcx
xorl %eax, %eax
callq _Z8writelogiiPKcz
jmp .LBB0_58
.LBB0_41: # %._crit_edge160
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB0_56
# %bb.42:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $81, %ecx
jmp .LBB0_10
.LBB0_15: # %._crit_edge166
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB0_56
# %bb.16:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $55, %ecx
jmp .LBB0_10
.LBB0_28: # %._crit_edge163
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB0_56
# %bb.29:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $68, %ecx
jmp .LBB0_10
.LBB0_54: # %._crit_edge
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB0_55
.LBB0_56:
movq 8(%rsp), %rax
movq 16(%rsp), %rcx # 8-byte Reload
movq %rax, (%rcx)
.LBB0_58:
movq %r14, %rax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_36:
.cfi_def_cfa_offset 80
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $76, %ecx
jmp .LBB0_10
.LBB0_8:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $50, %ecx
jmp .LBB0_10
.LBB0_23:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $63, %ecx
jmp .LBB0_10
.LBB0_49:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $89, %ecx
.LBB0_10:
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.LBB0_60:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $74, %ecx
jmp .LBB0_10
.LBB0_9:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $48, %ecx
jmp .LBB0_10
.LBB0_59:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $61, %ecx
jmp .LBB0_10
.LBB0_61:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $87, %ecx
jmp .LBB0_10
.LBB0_55:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %esi
movl $.L.str.2, %edx
movq %rbx, %rdi
movl $94, %ecx
jmp .LBB0_10
.Lfunc_end0:
.size _Z6mmcudaPPPviiii, .Lfunc_end0-_Z6mmcudaPPPviiii
.cfi_endproc
.section .rodata,"a",@progbits
.p2align 3, 0x0
.LJTI0_0:
.quad .LBB0_2
.quad .LBB0_17
.quad .LBB0_57
.quad .LBB0_30
.quad .LBB0_57
.quad .LBB0_57
.quad .LBB0_57
.quad .LBB0_43
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "error in makematr 1\n"
.size .L.str, 21
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Cuda error in file '%s' in line %i : %s.\n"
.size .L.str.1, 42
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/mbernaschi/StrongErgodicityBreaking/master/SBcode/GPU/mmcuda.hip"
.size .L.str.2, 122
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "error in makematr 2\n"
.size .L.str.3, 21
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "error in makematr 3\n"
.size .L.str.4, 21
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "error in makematr 4 for %d rows\n"
.size .L.str.5, 33
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Unexpected size: %d\n"
.size .L.str.6, 21
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <math.h>
#include <vector>
#include <thrust/extrema.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#define BLOCK_SIZE 1024
using namespace std;
const int SPLIT_SIZE = 512;
const int POCKET_SIZE = 1024;
#define cuda_check_error() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s\n", cudaGetErrorString(e)); \
exit(0); \
} \
}
struct bucket
{
int * buckets;
int buckets_count;
};
struct comparator {
__host__ __device__ bool operator()(float a, float b) {
return a < b;
}
};
float get_max_elem(thrust::device_ptr<float> p_arr, int n){
comparator comp;
thrust::device_ptr<float> res = thrust::max_element(p_arr, p_arr + n, comp);
return (*res);
}
float get_min_elem(thrust::device_ptr<float> p_arr, int n){
comparator comp;
thrust::device_ptr<float> res = thrust::min_element(p_arr, p_arr + n, comp);
return (*res);
}
int get_split_count(int n){
return (n - 1) / SPLIT_SIZE + 2;
}
int get_split_index(int n, float elem, float min_elem, float max_elem, int split_count){
if (max_elem == min_elem)
return 0;
return (int)((elem - min_elem) / (max_elem - min_elem) * (split_count - 1));
}
int get_extend_arr_size(int n){
bool is_power_o_two = n && !(n & (n - 1));
if (is_power_o_two && n > 1){
return n;
}
int nearest_power = 0;
while(n > 0){
n >>= 1;
nearest_power++;
}
return pow(2, nearest_power);
}
float * to_small_bucket(float * input, int * scan, int * split_indexes, int n, int bucket_count){
int* buckets = (int*)malloc(sizeof(int)*bucket_count);
memset(buckets, 0, sizeof(int)*bucket_count);
float * result = (float *)malloc(sizeof(float)*n);
for(int i = 0; i < n; i++){
result[scan[split_indexes[i]] + buckets[split_indexes[i]]] = input[i];
buckets[split_indexes[i]] += 1;
}
return result;
}
bucket get_big_bucket(int * scan, int split_count, int n){
bucket result;
vector <int> indexes;
indexes.push_back(0);
int prev = 0;
for (int i = 1; i < (split_count + 1); ++i){
int index_n = scan[i];
int diff = index_n - indexes.back();
if ((diff > POCKET_SIZE && prev != 0)){
indexes.push_back(prev);
}
else if (diff == POCKET_SIZE){
indexes.push_back(index_n);
}
if (i == split_count && indexes.back() != n){
indexes.push_back(index_n);
}
prev = index_n;
}
int pockets_index_size = indexes.size();
int* pockets_index = (int*)malloc(sizeof(int)*pockets_index_size);
memcpy(pockets_index, indexes.data(), sizeof(int)*pockets_index_size);
result.buckets_count = pockets_index_size;
result.buckets = pockets_index;
return result;
}
__global__ void histogram_kernel(int * split_indexes, int * histogram, int n){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
while (idx < n) {
atomicAdd(histogram + split_indexes[idx], 1);
idx += offset;
}
}
__global__ void odd_even_sort(float * arr, int * buckets, int count_buckets, int n){
int idx = threadIdx.x;
int block_id = blockIdx.x;
int count_elem;
__shared__ float data[BLOCK_SIZE];
for(int i = block_id; i < count_buckets - 1; i += gridDim.x){
count_elem = buckets[i + 1] - buckets[i];
if (count_elem > BLOCK_SIZE){
continue;
}
if(idx < count_elem){
data[idx] = arr[buckets[i] + idx];
}
__syncthreads();
int iter_count;
if (count_elem % 2 == 0)
iter_count = count_elem / 2;
else
iter_count = count_elem / 2 + 1;
for(int j = 0; j < iter_count; j++){
if((idx % 2 == 0) && (idx < count_elem - 1)){
if(data[idx] > data[idx + 1]){
float tmp = data[idx];
data[idx] = data[idx + 1];
data[idx + 1] = tmp;
}
}
__syncthreads();
if((idx % 2 != 0) && (idx < count_elem - 1)){
if(data[idx] > data[idx + 1]){
float tmp = data[idx];
data[idx] = data[idx + 1];
data[idx + 1] = tmp;
}
}
__syncthreads();
}
if(idx < count_elem)
arr[buckets[i] + idx] = data[idx];
}
}
void bucket_sort(float * array, bucket buckets, int n){
float * dev_arr;
int * gpu_buckets;
cudaMalloc(&gpu_buckets, sizeof(int) * buckets.buckets_count);
cudaMalloc(&dev_arr, sizeof(float) * n);
cudaMemcpy(gpu_buckets, buckets.buckets, sizeof(int) * buckets.buckets_count, cudaMemcpyHostToDevice);
cudaMemcpy(dev_arr, array, sizeof(float) * n, cudaMemcpyHostToDevice);
cudaEvent_t event;
cudaEventCreate(&event);
odd_even_sort<<<1024, BLOCK_SIZE>>>(dev_arr, gpu_buckets, buckets.buckets_count, n);
cudaEventSynchronize(event);
cudaMemcpy(array, dev_arr, sizeof(float) * n, cudaMemcpyDeviceToHost);
}
__global__ void scan_blocks(int * histogram, int * res, int * summ, bool is_summ){
int global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int idx = threadIdx.x;
int offset = 1;
__shared__ int data[BLOCK_SIZE];
data[2 * idx] = histogram[2 * global_idx];
data[2 * idx + 1] = histogram[2 * global_idx + 1];
for(int i = BLOCK_SIZE / 2; i > 0; i /= 2){
__syncthreads();
if(idx < i){
int left = offset * (2 * idx + 1) - 1;
int right = offset * (2 * idx + 2) - 1;
data[right] += data[left];
}
offset *= 2;
}
__syncthreads();
if(idx == 0){
if(!is_summ){
summ[blockIdx.x] = data[BLOCK_SIZE - 1];
}
data[BLOCK_SIZE - 1] = 0;
}
for(int i = 1; i < BLOCK_SIZE ; i *= 2){
offset /= 2;
__syncthreads();
if (idx < i){
int left = offset * (2 * idx + 1) - 1;
int right = offset * (2 * idx + 2) - 1;
int tmp = data[left];
data[left] = data[right];
data[right] += tmp;
}
}
__syncthreads();
res[2 * global_idx] = data[2 * idx];
res[2 * global_idx + 1] = data[2 * idx + 1];
}
__global__ void add_summ(int * blocks, int * summ, int block_count){
for(int i = blockIdx.x; i < block_count; i += gridDim.x){
blocks[blockDim.x * i + threadIdx.x] += summ[i];
}
}
int get_last_summ(int * summ, int n){
int res = 0;
for(int i = 0; i < n; i++){
res += summ[i];
}
return res;
}
int * recursive_scan(int * dev_arr, int n, int is_summ){
int * res = (int *)malloc(sizeof(int) * (n + 1));
int * dev_blocks;
int * dev_summ;
int * summ;
int * dev_scanned_summ;
int block_count = n / BLOCK_SIZE;
if (n >= BLOCK_SIZE){
block_count = n / BLOCK_SIZE;
}
else{
block_count = 1;
}
int threads_count = BLOCK_SIZE / 2;
summ = (int *)malloc(sizeof(int) * (block_count + 1));
cudaMalloc(&dev_blocks, sizeof(int) * n);
cudaMalloc(&dev_summ, sizeof(int) * block_count);
cudaMalloc(&dev_scanned_summ, sizeof(int) * block_count);
scan_blocks<<<block_count, threads_count>>>(dev_arr, dev_blocks, dev_summ, false);
cudaMemcpy(summ, dev_summ, sizeof(int) * block_count, cudaMemcpyDeviceToHost);
if(block_count > BLOCK_SIZE){
int * scan_summ = recursive_scan(dev_summ, block_count, false);
cudaMemcpy(dev_summ, scan_summ, sizeof(int) * block_count, cudaMemcpyHostToDevice);
add_summ<<<16, BLOCK_SIZE>>>(dev_blocks, dev_summ, block_count);
}
else{
scan_blocks<<<1, threads_count>>>(dev_summ, dev_scanned_summ, NULL, true);
add_summ<<<16, BLOCK_SIZE>>>(dev_blocks, dev_scanned_summ, block_count);
}
cudaMemcpy(res, dev_blocks, sizeof(int) * n, cudaMemcpyDeviceToHost);
res[n] = get_last_summ(summ, block_count);
return res;
}
void main_sort(float * input_array, int n){
int split_count, extended_size;
float min_elem, max_elem;
split_count = get_split_count(n);
extended_size = get_extend_arr_size(split_count + 1);
int * split_indexes = (int *)malloc(sizeof(int) * n);
int * histogram = (int *)malloc(sizeof(int) * extended_size);
int * gpu_split_indexes;
int * gpu_histogram;
int * gpu_scan;
float * gpu_arr;
cudaMalloc(&gpu_arr, sizeof(float) * n);
cudaMalloc(&gpu_split_indexes, sizeof(int) * n);
cudaMalloc(&gpu_histogram, sizeof(int) * extended_size);
cudaMalloc(&gpu_scan, sizeof(int) * extended_size);
cudaMemset(gpu_histogram, 0, sizeof(int) * extended_size);
cudaMemset(gpu_scan, 0, sizeof(int) * extended_size);
cudaMemcpy(gpu_arr, input_array, sizeof(float) * n, cudaMemcpyHostToDevice);
thrust::device_ptr<float> p_arr = thrust::device_pointer_cast(gpu_arr);
min_elem = get_min_elem(p_arr, n);
max_elem = get_max_elem(p_arr, n);
if(min_elem == max_elem){
return;
}
for(int i = 0; i < n; i++){
split_indexes[i] = get_split_index(n, input_array[i], min_elem, max_elem, split_count);
}
cudaMemcpy(gpu_split_indexes, split_indexes, sizeof(int) * n, cudaMemcpyHostToDevice);
histogram_kernel<<<64, BLOCK_SIZE>>>(gpu_split_indexes, gpu_histogram, n);
cudaMemcpy(histogram, gpu_histogram, sizeof(int) * extended_size, cudaMemcpyDeviceToHost);
int * cpu_scan = recursive_scan(gpu_histogram, extended_size, false);
float * to_small_buckets = to_small_bucket(input_array, cpu_scan, split_indexes, n, split_count);
bucket big_buckets = get_big_bucket(cpu_scan, split_count, n);
bucket_sort(to_small_buckets, big_buckets, n);
for(int i = 1; i < big_buckets.buckets_count; i++){
int size_pocket = big_buckets.buckets[i] - big_buckets.buckets[i - 1];
if (size_pocket > POCKET_SIZE){
int ind = big_buckets.buckets[i - 1];
main_sort(to_small_buckets + ind, size_pocket);
}
}
memcpy(input_array, to_small_buckets, sizeof(float) * n);
free(to_small_buckets);
free(cpu_scan);
free(split_indexes);
free(histogram);
cudaFree(gpu_split_indexes);
cudaFree(gpu_histogram);
cudaFree(gpu_scan);
cudaFree(gpu_arr);
}
int main() {
int n;
fread(&n, sizeof(int), 1, stdin);
float * input_array = (float *)malloc(sizeof(float) * n);
if (!n){
return 0;
}
fread(input_array, sizeof(float), n, stdin);
main_sort(input_array, n);
fwrite(input_array, sizeof(float), n, stdout);
free(input_array);
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
#include <vector>
#include <thrust/extrema.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#define BLOCK_SIZE 1024
using namespace std;
const int SPLIT_SIZE = 512;
const int POCKET_SIZE = 1024;
#define cuda_check_error() { \
hipError_t e=hipGetLastError(); \
if(e!=hipSuccess) { \
printf("Cuda failure %s\n", hipGetErrorString(e)); \
exit(0); \
} \
}
struct bucket
{
int * buckets;
int buckets_count;
};
struct comparator {
__host__ __device__ bool operator()(float a, float b) {
return a < b;
}
};
float get_max_elem(thrust::device_ptr<float> p_arr, int n){
comparator comp;
thrust::device_ptr<float> res = thrust::max_element(p_arr, p_arr + n, comp);
return (*res);
}
float get_min_elem(thrust::device_ptr<float> p_arr, int n){
comparator comp;
thrust::device_ptr<float> res = thrust::min_element(p_arr, p_arr + n, comp);
return (*res);
}
int get_split_count(int n){
return (n - 1) / SPLIT_SIZE + 2;
}
int get_split_index(int n, float elem, float min_elem, float max_elem, int split_count){
if (max_elem == min_elem)
return 0;
return (int)((elem - min_elem) / (max_elem - min_elem) * (split_count - 1));
}
int get_extend_arr_size(int n){
bool is_power_o_two = n && !(n & (n - 1));
if (is_power_o_two && n > 1){
return n;
}
int nearest_power = 0;
while(n > 0){
n >>= 1;
nearest_power++;
}
return pow(2, nearest_power);
}
float * to_small_bucket(float * input, int * scan, int * split_indexes, int n, int bucket_count){
int* buckets = (int*)malloc(sizeof(int)*bucket_count);
memset(buckets, 0, sizeof(int)*bucket_count);
float * result = (float *)malloc(sizeof(float)*n);
for(int i = 0; i < n; i++){
result[scan[split_indexes[i]] + buckets[split_indexes[i]]] = input[i];
buckets[split_indexes[i]] += 1;
}
return result;
}
bucket get_big_bucket(int * scan, int split_count, int n){
bucket result;
vector <int> indexes;
indexes.push_back(0);
int prev = 0;
for (int i = 1; i < (split_count + 1); ++i){
int index_n = scan[i];
int diff = index_n - indexes.back();
if ((diff > POCKET_SIZE && prev != 0)){
indexes.push_back(prev);
}
else if (diff == POCKET_SIZE){
indexes.push_back(index_n);
}
if (i == split_count && indexes.back() != n){
indexes.push_back(index_n);
}
prev = index_n;
}
int pockets_index_size = indexes.size();
int* pockets_index = (int*)malloc(sizeof(int)*pockets_index_size);
memcpy(pockets_index, indexes.data(), sizeof(int)*pockets_index_size);
result.buckets_count = pockets_index_size;
result.buckets = pockets_index;
return result;
}
__global__ void histogram_kernel(int * split_indexes, int * histogram, int n){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
while (idx < n) {
atomicAdd(histogram + split_indexes[idx], 1);
idx += offset;
}
}
__global__ void odd_even_sort(float * arr, int * buckets, int count_buckets, int n){
int idx = threadIdx.x;
int block_id = blockIdx.x;
int count_elem;
__shared__ float data[BLOCK_SIZE];
for(int i = block_id; i < count_buckets - 1; i += gridDim.x){
count_elem = buckets[i + 1] - buckets[i];
if (count_elem > BLOCK_SIZE){
continue;
}
if(idx < count_elem){
data[idx] = arr[buckets[i] + idx];
}
__syncthreads();
int iter_count;
if (count_elem % 2 == 0)
iter_count = count_elem / 2;
else
iter_count = count_elem / 2 + 1;
for(int j = 0; j < iter_count; j++){
if((idx % 2 == 0) && (idx < count_elem - 1)){
if(data[idx] > data[idx + 1]){
float tmp = data[idx];
data[idx] = data[idx + 1];
data[idx + 1] = tmp;
}
}
__syncthreads();
if((idx % 2 != 0) && (idx < count_elem - 1)){
if(data[idx] > data[idx + 1]){
float tmp = data[idx];
data[idx] = data[idx + 1];
data[idx + 1] = tmp;
}
}
__syncthreads();
}
if(idx < count_elem)
arr[buckets[i] + idx] = data[idx];
}
}
void bucket_sort(float * array, bucket buckets, int n){
float * dev_arr;
int * gpu_buckets;
hipMalloc(&gpu_buckets, sizeof(int) * buckets.buckets_count);
hipMalloc(&dev_arr, sizeof(float) * n);
hipMemcpy(gpu_buckets, buckets.buckets, sizeof(int) * buckets.buckets_count, hipMemcpyHostToDevice);
hipMemcpy(dev_arr, array, sizeof(float) * n, hipMemcpyHostToDevice);
hipEvent_t event;
hipEventCreate(&event);
odd_even_sort<<<1024, BLOCK_SIZE>>>(dev_arr, gpu_buckets, buckets.buckets_count, n);
hipEventSynchronize(event);
hipMemcpy(array, dev_arr, sizeof(float) * n, hipMemcpyDeviceToHost);
}
__global__ void scan_blocks(int * histogram, int * res, int * summ, bool is_summ){
int global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int idx = threadIdx.x;
int offset = 1;
__shared__ int data[BLOCK_SIZE];
data[2 * idx] = histogram[2 * global_idx];
data[2 * idx + 1] = histogram[2 * global_idx + 1];
for(int i = BLOCK_SIZE / 2; i > 0; i /= 2){
__syncthreads();
if(idx < i){
int left = offset * (2 * idx + 1) - 1;
int right = offset * (2 * idx + 2) - 1;
data[right] += data[left];
}
offset *= 2;
}
__syncthreads();
if(idx == 0){
if(!is_summ){
summ[blockIdx.x] = data[BLOCK_SIZE - 1];
}
data[BLOCK_SIZE - 1] = 0;
}
for(int i = 1; i < BLOCK_SIZE ; i *= 2){
offset /= 2;
__syncthreads();
if (idx < i){
int left = offset * (2 * idx + 1) - 1;
int right = offset * (2 * idx + 2) - 1;
int tmp = data[left];
data[left] = data[right];
data[right] += tmp;
}
}
__syncthreads();
res[2 * global_idx] = data[2 * idx];
res[2 * global_idx + 1] = data[2 * idx + 1];
}
__global__ void add_summ(int * blocks, int * summ, int block_count){
for(int i = blockIdx.x; i < block_count; i += gridDim.x){
blocks[blockDim.x * i + threadIdx.x] += summ[i];
}
}
int get_last_summ(int * summ, int n){
int res = 0;
for(int i = 0; i < n; i++){
res += summ[i];
}
return res;
}
int * recursive_scan(int * dev_arr, int n, int is_summ){
int * res = (int *)malloc(sizeof(int) * (n + 1));
int * dev_blocks;
int * dev_summ;
int * summ;
int * dev_scanned_summ;
int block_count = n / BLOCK_SIZE;
if (n >= BLOCK_SIZE){
block_count = n / BLOCK_SIZE;
}
else{
block_count = 1;
}
int threads_count = BLOCK_SIZE / 2;
summ = (int *)malloc(sizeof(int) * (block_count + 1));
hipMalloc(&dev_blocks, sizeof(int) * n);
hipMalloc(&dev_summ, sizeof(int) * block_count);
hipMalloc(&dev_scanned_summ, sizeof(int) * block_count);
scan_blocks<<<block_count, threads_count>>>(dev_arr, dev_blocks, dev_summ, false);
hipMemcpy(summ, dev_summ, sizeof(int) * block_count, hipMemcpyDeviceToHost);
if(block_count > BLOCK_SIZE){
int * scan_summ = recursive_scan(dev_summ, block_count, false);
hipMemcpy(dev_summ, scan_summ, sizeof(int) * block_count, hipMemcpyHostToDevice);
add_summ<<<16, BLOCK_SIZE>>>(dev_blocks, dev_summ, block_count);
}
else{
scan_blocks<<<1, threads_count>>>(dev_summ, dev_scanned_summ, NULL, true);
add_summ<<<16, BLOCK_SIZE>>>(dev_blocks, dev_scanned_summ, block_count);
}
hipMemcpy(res, dev_blocks, sizeof(int) * n, hipMemcpyDeviceToHost);
res[n] = get_last_summ(summ, block_count);
return res;
}
void main_sort(float * input_array, int n){
int split_count, extended_size;
float min_elem, max_elem;
split_count = get_split_count(n);
extended_size = get_extend_arr_size(split_count + 1);
int * split_indexes = (int *)malloc(sizeof(int) * n);
int * histogram = (int *)malloc(sizeof(int) * extended_size);
int * gpu_split_indexes;
int * gpu_histogram;
int * gpu_scan;
float * gpu_arr;
hipMalloc(&gpu_arr, sizeof(float) * n);
hipMalloc(&gpu_split_indexes, sizeof(int) * n);
hipMalloc(&gpu_histogram, sizeof(int) * extended_size);
hipMalloc(&gpu_scan, sizeof(int) * extended_size);
hipMemset(gpu_histogram, 0, sizeof(int) * extended_size);
hipMemset(gpu_scan, 0, sizeof(int) * extended_size);
hipMemcpy(gpu_arr, input_array, sizeof(float) * n, hipMemcpyHostToDevice);
thrust::device_ptr<float> p_arr = thrust::device_pointer_cast(gpu_arr);
min_elem = get_min_elem(p_arr, n);
max_elem = get_max_elem(p_arr, n);
if(min_elem == max_elem){
return;
}
for(int i = 0; i < n; i++){
split_indexes[i] = get_split_index(n, input_array[i], min_elem, max_elem, split_count);
}
hipMemcpy(gpu_split_indexes, split_indexes, sizeof(int) * n, hipMemcpyHostToDevice);
histogram_kernel<<<64, BLOCK_SIZE>>>(gpu_split_indexes, gpu_histogram, n);
hipMemcpy(histogram, gpu_histogram, sizeof(int) * extended_size, hipMemcpyDeviceToHost);
int * cpu_scan = recursive_scan(gpu_histogram, extended_size, false);
float * to_small_buckets = to_small_bucket(input_array, cpu_scan, split_indexes, n, split_count);
bucket big_buckets = get_big_bucket(cpu_scan, split_count, n);
bucket_sort(to_small_buckets, big_buckets, n);
for(int i = 1; i < big_buckets.buckets_count; i++){
int size_pocket = big_buckets.buckets[i] - big_buckets.buckets[i - 1];
if (size_pocket > POCKET_SIZE){
int ind = big_buckets.buckets[i - 1];
main_sort(to_small_buckets + ind, size_pocket);
}
}
memcpy(input_array, to_small_buckets, sizeof(float) * n);
free(to_small_buckets);
free(cpu_scan);
free(split_indexes);
free(histogram);
hipFree(gpu_split_indexes);
hipFree(gpu_histogram);
hipFree(gpu_scan);
hipFree(gpu_arr);
}
int main() {
int n;
fread(&n, sizeof(int), 1, stdin);
float * input_array = (float *)malloc(sizeof(float) * n);
if (!n){
return 0;
}
fread(input_array, sizeof(float), n, stdin);
main_sort(input_array, n);
fwrite(input_array, sizeof(float), n, stdout);
free(input_array);
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <vector>
#define N 10
__global__ void add(float *a, float *b, float *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void cuda_check(cudaError_t status) {
if (status != cudaSuccess) {
std::cout << "Error could not allocate memory result " << status << std::endl;
exit(1);
}
}
int main(void) {
std::vector<float> vec_a;
std::vector<float> vec_b;
std::vector<float> vec_c;
float *a, *b, *c;
float *d_a, *d_b, *d_c;
int size = N * sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
cuda_check(cudaHostAlloc((void **)&a, size, cudaHostAllocPortable));
cuda_check(cudaHostAlloc((void **)&b, size, cudaHostAllocPortable));
cuda_check(cudaHostAlloc((void **)&c, size, cudaHostAllocPortable));
// a = new int[N];
// b = new int[N];
// c = new int[N];
for(int i = 0; i < N; ++i) {
vec_a.push_back(i);
vec_b.push_back(i);
//a[i] = i;
//b[i] = i;
}
memcpy(a, &vec_a[0], size);
memcpy(b, &vec_b[0], size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
add<<<N, 1>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < N; ++i) {
std::cout << c[i] << std::endl;
}
//free(a); free(b); free(c);
cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} | code for sm_80
Function : _Z3addPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0007 */
/*0050*/ IMAD.WIDE.U32 R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0007 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0007 */
/*0090*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <vector>
#define N 10
__global__ void add(float *a, float *b, float *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void cuda_check(cudaError_t status) {
if (status != cudaSuccess) {
std::cout << "Error could not allocate memory result " << status << std::endl;
exit(1);
}
}
int main(void) {
std::vector<float> vec_a;
std::vector<float> vec_b;
std::vector<float> vec_c;
float *a, *b, *c;
float *d_a, *d_b, *d_c;
int size = N * sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
cuda_check(cudaHostAlloc((void **)&a, size, cudaHostAllocPortable));
cuda_check(cudaHostAlloc((void **)&b, size, cudaHostAllocPortable));
cuda_check(cudaHostAlloc((void **)&c, size, cudaHostAllocPortable));
// a = new int[N];
// b = new int[N];
// c = new int[N];
for(int i = 0; i < N; ++i) {
vec_a.push_back(i);
vec_b.push_back(i);
//a[i] = i;
//b[i] = i;
}
memcpy(a, &vec_a[0], size);
memcpy(b, &vec_b[0], size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
add<<<N, 1>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < N; ++i) {
std::cout << c[i] << std::endl;
}
//free(a); free(b); free(c);
cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} | .file "tmpxft_00149f16_00000000-6_Main.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4046:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4046:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Error could not allocate memory result "
.text
.globl _Z10cuda_check9cudaError
.type _Z10cuda_check9cudaError, @function
_Z10cuda_check9cudaError:
.LFB4032:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L8
ret
.L8:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movl %edi, %ebx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebx, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE4032:
.size _Z10cuda_check9cudaError, .-_Z10cuda_check9cudaError
.globl _Z26__device_stub__Z3addPfS_S_PfS_S_
.type _Z26__device_stub__Z3addPfS_S_PfS_S_, @function
_Z26__device_stub__Z3addPfS_S_PfS_S_:
.LFB4068:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4068:
.size _Z26__device_stub__Z3addPfS_S_PfS_S_, .-_Z26__device_stub__Z3addPfS_S_PfS_S_
.globl _Z3addPfS_S_
.type _Z3addPfS_S_, @function
_Z3addPfS_S_:
.LFB4069:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z3addPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4069:
.size _Z3addPfS_S_, .-_Z3addPfS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z3addPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4071:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4071:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._ZNSt6vectorIfSaIfEED2Ev,"axG",@progbits,_ZNSt6vectorIfSaIfEED5Ev,comdat
.align 2
.weak _ZNSt6vectorIfSaIfEED2Ev
.type _ZNSt6vectorIfSaIfEED2Ev, @function
_ZNSt6vectorIfSaIfEED2Ev:
.LFB4384:
.cfi_startproc
endbr64
movq (%rdi), %rax
testq %rax, %rax
je .L22
subq $8, %rsp
.cfi_def_cfa_offset 16
movq 16(%rdi), %rsi
subq %rax, %rsi
movq %rax, %rdi
call _ZdlPvm@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.L22:
ret
.cfi_endproc
.LFE4384:
.size _ZNSt6vectorIfSaIfEED2Ev, .-_ZNSt6vectorIfSaIfEED2Ev
.weak _ZNSt6vectorIfSaIfEED1Ev
.set _ZNSt6vectorIfSaIfEED1Ev,_ZNSt6vectorIfSaIfEED2Ev
.section .rodata._ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_.str1.1,"aMS",@progbits,1
.LC2:
.string "vector::_M_realloc_insert"
.section .text._ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_,"axG",@progbits,_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_,comdat
.align 2
.weak _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_
.type _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_, @function
_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_:
.LFB4645:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rsi, (%rsp)
movq %rdx, 8(%rsp)
movq 8(%rdi), %rbp
movq (%rdi), %r13
movq %rbp, %rax
subq %r13, %rax
sarq $2, %rax
movabsq $2305843009213693951, %rdx
cmpq %rdx, %rax
je .L42
movq %rdi, %rbx
cmpq %r13, %rbp
movl $1, %edx
cmovne %rax, %rdx
addq %rdx, %rax
jc .L28
movabsq $2305843009213693951, %r14
cmpq %r14, %rax
cmovbe %rax, %r14
movq (%rsp), %r15
subq %r13, %r15
movl $0, %r12d
testq %rax, %rax
je .L29
jmp .L36
.L42:
leaq .LC2(%rip), %rdi
call _ZSt20__throw_length_errorPKc@PLT
.L43:
movq %r15, %rdx
movq %r13, %rsi
movq %r12, %rdi
call memmove@PLT
leaq 4(%r12,%r15), %r15
movq (%rsp), %rax
subq %rax, %rbp
testq %rbp, %rbp
jg .L31
addq %rbp, %r15
movq 16(%rbx), %rsi
subq %r13, %rsi
jmp .L35
.L28:
movq (%rsp), %r15
subq %r13, %r15
movabsq $2305843009213693951, %r14
.L36:
leaq 0(,%r14,4), %rdi
call _Znwm@PLT
movq %rax, %r12
.L29:
movq 8(%rsp), %rax
movss (%rax), %xmm0
movss %xmm0, (%r12,%r15)
testq %r15, %r15
jg .L43
leaq 4(%r12,%r15), %r15
movq (%rsp), %rax
subq %rax, %rbp
testq %rbp, %rbp
jle .L33
.L31:
movq %rbp, %rdx
movq (%rsp), %rsi
movq %r15, %rdi
call memcpy@PLT
.L33:
addq %rbp, %r15
testq %r13, %r13
je .L34
movq 16(%rbx), %rsi
subq %r13, %rsi
.L35:
movq %r13, %rdi
call _ZdlPvm@PLT
.L34:
movq %r12, (%rbx)
movq %r15, 8(%rbx)
leaq (%r12,%r14,4), %rax
movq %rax, 16(%rbx)
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4645:
.size _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_, .-_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_
.section .text._ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_,"axG",@progbits,_ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_,comdat
.align 2
.weak _ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_
.type _ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_, @function
_ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_:
.LFB4545:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq %rdi, %rbx
movq 8(%rdi), %rax
cmpq 16(%rdi), %rax
je .L45
movss (%rsi), %xmm0
movss %xmm0, (%rax)
addq $4, 8(%rdi)
.L46:
movq 8(%rbx), %rax
subq $4, %rax
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
movq %rsi, %rdx
movq %rax, %rsi
call _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_
jmp .L46
.cfi_endproc
.LFE4545:
.size _ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_, .-_ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_
.text
.globl main
.type main, @function
main:
.LFB4033:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4033
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $184, %rsp
.cfi_def_cfa_offset 224
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
movq $0, 80(%rsp)
movq $0, 88(%rsp)
movq $0, 96(%rsp)
movq $0, 112(%rsp)
movq $0, 120(%rsp)
movq $0, 128(%rsp)
movq $0, 144(%rsp)
movq $0, 152(%rsp)
movq $0, 160(%rsp)
leaq 32(%rsp), %rdi
movl $40, %esi
.LEHB0:
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $1, %edx
movl $40, %esi
call cudaHostAlloc@PLT
movl %eax, %edi
call _Z10cuda_check9cudaError
leaq 16(%rsp), %rdi
movl $1, %edx
movl $40, %esi
call cudaHostAlloc@PLT
movl %eax, %edi
call _Z10cuda_check9cudaError
leaq 24(%rsp), %rdi
movl $1, %edx
movl $40, %esi
call cudaHostAlloc@PLT
movl %eax, %edi
call _Z10cuda_check9cudaError
movl $0, %ebx
leaq 68(%rsp), %r12
jmp .L49
.L65:
movl %ebp, 68(%rsp)
leaq 112(%rsp), %rdi
movq %r12, %rsi
call _ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_
addl $1, %ebx
cmpl $10, %ebx
je .L64
.L49:
pxor %xmm1, %xmm1
cvtsi2ssl %ebx, %xmm1
movd %xmm1, %ebp
movss %xmm1, 68(%rsp)
leaq 80(%rsp), %rdi
movq %r12, %rsi
call _ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_
jmp .L65
.L64:
movq 8(%rsp), %rsi
movq 80(%rsp), %rax
movdqu (%rax), %xmm2
movups %xmm2, (%rsi)
movdqu 16(%rax), %xmm3
movups %xmm3, 16(%rsi)
movq 32(%rax), %rax
movq %rax, 32(%rsi)
movq 16(%rsp), %rax
movq 112(%rsp), %rdx
movdqu (%rdx), %xmm4
movups %xmm4, (%rax)
movdqu 16(%rdx), %xmm5
movups %xmm5, 16(%rax)
movq 32(%rdx), %rdx
movq %rdx, 32(%rax)
movl $1, %ecx
movl $40, %edx
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $40, %edx
movq 16(%rsp), %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $10, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 68(%rsp), %rdx
movl $1, %ecx
movq 56(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L50
movq 48(%rsp), %rdx
movq 40(%rsp), %rsi
movq 32(%rsp), %rdi
call _Z26__device_stub__Z3addPfS_S_PfS_S_
.L50:
movl $2, %ecx
movl $40, %edx
movq 48(%rsp), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %ebp
leaq _ZSt4cout(%rip), %r12
jmp .L55
.L70:
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r13
testq %r13, %r13
je .L66
cmpb $0, 56(%r13)
je .L53
movzbl 67(%r13), %esi
.L54:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
jmp .L67
.L66:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L68
call _ZSt16__throw_bad_castv@PLT
.L59:
endbr64
movq %rax, %rbx
leaq 144(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
leaq 112(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
leaq 80(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
movq 168(%rsp), %rax
subq %fs:40, %rax
je .L57
call __stack_chk_fail@PLT
.L68:
call __stack_chk_fail@PLT
.L53:
movq %r13, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq 0(%r13), %rax
movl $10, %esi
movq %r13, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L54
.L67:
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $4, %rbp
cmpq $40, %rbp
je .L69
.L55:
movq 24(%rsp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbp), %xmm0
movq %r12, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
jmp .L70
.L69:
movq 8(%rsp), %rdi
call cudaFreeHost@PLT
movq 16(%rsp), %rdi
call cudaFreeHost@PLT
movq 24(%rsp), %rdi
call cudaFreeHost@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
.LEHE0:
leaq 144(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
leaq 112(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
leaq 80(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L71
movl $0, %eax
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L57:
.cfi_restore_state
movq %rbx, %rdi
.LEHB1:
call _Unwind_Resume@PLT
.LEHE1:
.L71:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4033:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA4033:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE4033-.LLSDACSB4033
.LLSDACSB4033:
.uleb128 .LEHB0-.LFB4033
.uleb128 .LEHE0-.LEHB0
.uleb128 .L59-.LFB4033
.uleb128 0
.uleb128 .LEHB1-.LFB4033
.uleb128 .LEHE1-.LEHB1
.uleb128 0
.uleb128 0
.LLSDACSE4033:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <vector>
#define N 10
__global__ void add(float *a, float *b, float *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void cuda_check(cudaError_t status) {
if (status != cudaSuccess) {
std::cout << "Error could not allocate memory result " << status << std::endl;
exit(1);
}
}
int main(void) {
std::vector<float> vec_a;
std::vector<float> vec_b;
std::vector<float> vec_c;
float *a, *b, *c;
float *d_a, *d_b, *d_c;
int size = N * sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
cuda_check(cudaHostAlloc((void **)&a, size, cudaHostAllocPortable));
cuda_check(cudaHostAlloc((void **)&b, size, cudaHostAllocPortable));
cuda_check(cudaHostAlloc((void **)&c, size, cudaHostAllocPortable));
// a = new int[N];
// b = new int[N];
// c = new int[N];
for(int i = 0; i < N; ++i) {
vec_a.push_back(i);
vec_b.push_back(i);
//a[i] = i;
//b[i] = i;
}
memcpy(a, &vec_a[0], size);
memcpy(b, &vec_b[0], size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
add<<<N, 1>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < N; ++i) {
std::cout << c[i] << std::endl;
}
//free(a); free(b); free(c);
cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#define N 10
__global__ void add(float *a, float *b, float *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void cuda_check(hipError_t status) {
if (status != hipSuccess) {
std::cout << "Error could not allocate memory result " << status << std::endl;
exit(1);
}
}
int main(void) {
std::vector<float> vec_a;
std::vector<float> vec_b;
std::vector<float> vec_c;
float *a, *b, *c;
float *d_a, *d_b, *d_c;
int size = N * sizeof(int);
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
cuda_check(hipHostAlloc((void **)&a, size, hipHostMallocPortable));
cuda_check(hipHostAlloc((void **)&b, size, hipHostMallocPortable));
cuda_check(hipHostAlloc((void **)&c, size, hipHostMallocPortable));
// a = new int[N];
// b = new int[N];
// c = new int[N];
for(int i = 0; i < N; ++i) {
vec_a.push_back(i);
vec_b.push_back(i);
//a[i] = i;
//b[i] = i;
}
memcpy(a, &vec_a[0], size);
memcpy(b, &vec_b[0], size);
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
add<<<N, 1>>>(d_a, d_b, d_c);
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
for(int i = 0; i < N; ++i) {
std::cout << c[i] << std::endl;
}
//free(a); free(b); free(c);
hipHostFree(a); hipHostFree(b); hipHostFree(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#define N 10
__global__ void add(float *a, float *b, float *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void cuda_check(hipError_t status) {
if (status != hipSuccess) {
std::cout << "Error could not allocate memory result " << status << std::endl;
exit(1);
}
}
int main(void) {
std::vector<float> vec_a;
std::vector<float> vec_b;
std::vector<float> vec_c;
float *a, *b, *c;
float *d_a, *d_b, *d_c;
int size = N * sizeof(int);
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
cuda_check(hipHostAlloc((void **)&a, size, hipHostMallocPortable));
cuda_check(hipHostAlloc((void **)&b, size, hipHostMallocPortable));
cuda_check(hipHostAlloc((void **)&c, size, hipHostMallocPortable));
// a = new int[N];
// b = new int[N];
// c = new int[N];
for(int i = 0; i < N; ++i) {
vec_a.push_back(i);
vec_b.push_back(i);
//a[i] = i;
//b[i] = i;
}
memcpy(a, &vec_a[0], size);
memcpy(b, &vec_b[0], size);
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
add<<<N, 1>>>(d_a, d_b, d_c);
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
for(int i = 0; i < N; ++i) {
std::cout << c[i] << std::endl;
}
//free(a); free(b); free(c);
hipHostFree(a); hipHostFree(b); hipHostFree(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPfS_S_
.globl _Z3addPfS_S_
.p2align 8
.type _Z3addPfS_S_,@function
_Z3addPfS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
s_mov_b32 s2, s15
s_mov_b32 s3, 0
s_load_b64 s[0:1], s[0:1], 0x10
s_lshl_b64 s[2:3], s[2:3], 2
v_mov_b32_e32 v1, 0
s_waitcnt lgkmcnt(0)
s_add_u32 s4, s4, s2
s_addc_u32 s5, s5, s3
s_add_u32 s6, s6, s2
s_addc_u32 s7, s7, s3
s_load_b32 s4, s[4:5], 0x0
s_load_b32 s5, s[6:7], 0x0
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
s_waitcnt lgkmcnt(0)
v_add_f32_e64 v0, s4, s5
global_store_b32 v1, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPfS_S_, .Lfunc_end0-_Z3addPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z3addPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#define N 10
__global__ void add(float *a, float *b, float *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void cuda_check(hipError_t status) {
if (status != hipSuccess) {
std::cout << "Error could not allocate memory result " << status << std::endl;
exit(1);
}
}
int main(void) {
std::vector<float> vec_a;
std::vector<float> vec_b;
std::vector<float> vec_c;
float *a, *b, *c;
float *d_a, *d_b, *d_c;
int size = N * sizeof(int);
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
cuda_check(hipHostAlloc((void **)&a, size, hipHostMallocPortable));
cuda_check(hipHostAlloc((void **)&b, size, hipHostMallocPortable));
cuda_check(hipHostAlloc((void **)&c, size, hipHostMallocPortable));
// a = new int[N];
// b = new int[N];
// c = new int[N];
for(int i = 0; i < N; ++i) {
vec_a.push_back(i);
vec_b.push_back(i);
//a[i] = i;
//b[i] = i;
}
memcpy(a, &vec_a[0], size);
memcpy(b, &vec_b[0], size);
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
add<<<N, 1>>>(d_a, d_b, d_c);
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
for(int i = 0; i < N; ++i) {
std::cout << c[i] << std::endl;
}
//free(a); free(b); free(c);
hipHostFree(a); hipHostFree(b); hipHostFree(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
} | .text
.file "Main.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__addPfS_S_ # -- Begin function _Z18__device_stub__addPfS_S_
.p2align 4, 0x90
.type _Z18__device_stub__addPfS_S_,@function
_Z18__device_stub__addPfS_S_: # @_Z18__device_stub__addPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addPfS_S_, .Lfunc_end0-_Z18__device_stub__addPfS_S_
.cfi_endproc
# -- End function
.globl _Z10cuda_check10hipError_t # -- Begin function _Z10cuda_check10hipError_t
.p2align 4, 0x90
.type _Z10cuda_check10hipError_t,@function
_Z10cuda_check10hipError_t: # @_Z10cuda_check10hipError_t
.cfi_startproc
# %bb.0:
testl %edi, %edi
jne .LBB1_2
# %bb.1:
retq
.LBB1_2:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movl %edi, %ebx
movl $_ZSt4cout, %edi
movl $.L.str, %esi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movq %rax, %rdi
movl %ebx, %esi
callq _ZNSolsEi
movq %rax, %rdi
callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
movl $1, %edi
callq exit
.Lfunc_end1:
.size _Z10cuda_check10hipError_t, .Lfunc_end1-_Z10cuda_check10hipError_t
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
xorl %r13d, %r13d
.Ltmp0:
.cfi_escape 0x2e, 0x00
leaq 56(%rsp), %rdi
movl $40, %esi
xorl %r14d, %r14d
callq hipMalloc
.Ltmp1:
# %bb.1:
xorl %r13d, %r13d
.Ltmp2:
.cfi_escape 0x2e, 0x00
leaq 48(%rsp), %rdi
movl $40, %esi
xorl %r14d, %r14d
callq hipMalloc
.Ltmp3:
# %bb.2:
xorl %r13d, %r13d
.Ltmp4:
.cfi_escape 0x2e, 0x00
leaq 40(%rsp), %rdi
movl $40, %esi
xorl %r14d, %r14d
callq hipMalloc
.Ltmp5:
# %bb.3:
xorl %r13d, %r13d
.Ltmp6:
.cfi_escape 0x2e, 0x00
leaq 80(%rsp), %rdi
movl $40, %esi
xorl %r14d, %r14d
movl $1, %edx
callq hipHostAlloc
.Ltmp7:
# %bb.4:
xorl %r13d, %r13d
testl %eax, %eax
jne .LBB2_5
# %bb.9: # %_Z10cuda_check10hipError_t.exit
.Ltmp14:
.cfi_escape 0x2e, 0x00
leaq 72(%rsp), %rdi
movl $40, %esi
xorl %r14d, %r14d
movl $1, %edx
callq hipHostAlloc
.Ltmp15:
# %bb.10:
xorl %r13d, %r13d
testl %eax, %eax
jne .LBB2_11
# %bb.15: # %_Z10cuda_check10hipError_t.exit34
.Ltmp22:
.cfi_escape 0x2e, 0x00
leaq 64(%rsp), %rdi
movl $40, %esi
xorl %r14d, %r14d
movl $1, %edx
callq hipHostAlloc
.Ltmp23:
# %bb.16:
testl %eax, %eax
jne .LBB2_20
# %bb.17: # %_Z10cuda_check10hipError_t.exit39.preheader.preheader
xorl %ebp, %ebp
xorl %ebx, %ebx
xorl %r15d, %r15d
xorl %eax, %eax
xorl %r13d, %r13d
xorl %r12d, %r12d
xorl %edx, %edx
jmp .LBB2_18
.p2align 4, 0x90
.LBB2_43: # in Loop: Header=BB2_18 Depth=1
movss %xmm0, (%r12)
.LBB2_60: # %_ZNSt6vectorIfSaIfEE9push_backEOf.exit52
# in Loop: Header=BB2_18 Depth=1
addq $4, %r15
addq $4, %r12
incl %ebp
cmpl $10, %ebp
je .LBB2_61
.LBB2_18: # %_Z10cuda_check10hipError_t.exit39.preheader
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %ebp, %xmm0
cmpq %rax, %r15
je .LBB2_26
# %bb.19: # in Loop: Header=BB2_18 Depth=1
movss %xmm0, (%r15)
cmpq %rdx, %r12
jne .LBB2_43
jmp .LBB2_44
.p2align 4, 0x90
.LBB2_26: # in Loop: Header=BB2_18 Depth=1
movq %rdx, 32(%rsp) # 8-byte Spill
movq %rbx, 16(%rsp) # 8-byte Spill
subq %rbx, %r15
movabsq $9223372036854775804, %rax # imm = 0x7FFFFFFFFFFFFFFC
cmpq %rax, %r15
je .LBB2_27
# %bb.29: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i
# in Loop: Header=BB2_18 Depth=1
movq %r13, 24(%rsp) # 8-byte Spill
movq %r15, %rbx
sarq $2, %rbx
cmpq $1, %rbx
movq %rbx, %rax
adcq $0, %rax
leaq (%rax,%rbx), %rcx
movabsq $2305843009213693951, %r14 # imm = 0x1FFFFFFFFFFFFFFF
cmpq %r14, %rcx
jb .LBB2_31
# %bb.30: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i
# in Loop: Header=BB2_18 Depth=1
movq %r14, %rcx
.LBB2_31: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i
# in Loop: Header=BB2_18 Depth=1
addq %rbx, %rax
jb .LBB2_33
# %bb.32: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i
# in Loop: Header=BB2_18 Depth=1
movq %rcx, %r14
.LBB2_33: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i
# in Loop: Header=BB2_18 Depth=1
testq %r14, %r14
movss %xmm0, 12(%rsp) # 4-byte Spill
je .LBB2_34
# %bb.35: # in Loop: Header=BB2_18 Depth=1
leaq (,%r14,4), %rdi
.Ltmp30:
.cfi_escape 0x2e, 0x00
callq _Znwm
.Ltmp31:
# %bb.36: # in Loop: Header=BB2_18 Depth=1
movq %rax, %r13
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
jmp .LBB2_37
.LBB2_34: # in Loop: Header=BB2_18 Depth=1
xorl %r13d, %r13d
.LBB2_37: # %_ZNSt12_Vector_baseIfSaIfEE11_M_allocateEm.exit.i.i.i
# in Loop: Header=BB2_18 Depth=1
movss %xmm0, (%r13,%rbx,4)
testq %r15, %r15
movq 16(%rsp), %rbx # 8-byte Reload
jle .LBB2_39
# %bb.38: # in Loop: Header=BB2_18 Depth=1
.cfi_escape 0x2e, 0x00
movq %r13, %rdi
movq %rbx, %rsi
movq %r15, %rdx
callq memmove@PLT
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
.LBB2_39: # %_ZNSt6vectorIfSaIfEE11_S_relocateEPfS2_S2_RS0_.exit.i.i.i
# in Loop: Header=BB2_18 Depth=1
testq %rbx, %rbx
je .LBB2_41
# %bb.40: # in Loop: Header=BB2_18 Depth=1
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _ZdlPv
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
.LBB2_41: # %_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_.exit.i.i
# in Loop: Header=BB2_18 Depth=1
addq %r13, %r15
leaq (,%r14,4), %rax
addq %r13, %rax
movq %r13, %rbx
movq 24(%rsp), %r13 # 8-byte Reload
movq 32(%rsp), %rdx # 8-byte Reload
cmpq %rdx, %r12
jne .LBB2_43
.LBB2_44: # in Loop: Header=BB2_18 Depth=1
movq %rax, 32(%rsp) # 8-byte Spill
movq %r13, 24(%rsp) # 8-byte Spill
subq %r13, %r12
movabsq $9223372036854775804, %rax # imm = 0x7FFFFFFFFFFFFFFC
cmpq %rax, %r12
je .LBB2_45
# %bb.47: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i43
# in Loop: Header=BB2_18 Depth=1
movq %rbx, 16(%rsp) # 8-byte Spill
movq %r12, %r14
sarq $2, %r14
cmpq $1, %r14
movq %r14, %rax
adcq $0, %rax
leaq (%rax,%r14), %rcx
movabsq $2305843009213693951, %rbx # imm = 0x1FFFFFFFFFFFFFFF
cmpq %rbx, %rcx
jae .LBB2_48
# %bb.49: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i43
# in Loop: Header=BB2_18 Depth=1
addq %r14, %rax
jae .LBB2_50
.LBB2_51: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i43
# in Loop: Header=BB2_18 Depth=1
testq %rbx, %rbx
je .LBB2_52
.LBB2_53: # in Loop: Header=BB2_18 Depth=1
movss %xmm0, 12(%rsp) # 4-byte Spill
leaq (,%rbx,4), %rdi
.Ltmp33:
.cfi_escape 0x2e, 0x00
callq _Znwm
.Ltmp34:
# %bb.54: # in Loop: Header=BB2_18 Depth=1
movq %rax, %r13
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
jmp .LBB2_55
.LBB2_48: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i43
# in Loop: Header=BB2_18 Depth=1
movq %rbx, %rcx
addq %r14, %rax
jb .LBB2_51
.LBB2_50: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i43
# in Loop: Header=BB2_18 Depth=1
movq %rcx, %rbx
testq %rbx, %rbx
jne .LBB2_53
.LBB2_52: # in Loop: Header=BB2_18 Depth=1
xorl %r13d, %r13d
.LBB2_55: # %_ZNSt12_Vector_baseIfSaIfEE11_M_allocateEm.exit.i.i.i46
# in Loop: Header=BB2_18 Depth=1
movss %xmm0, (%r13,%r14,4)
testq %r12, %r12
movq 24(%rsp), %r14 # 8-byte Reload
jle .LBB2_57
# %bb.56: # in Loop: Header=BB2_18 Depth=1
.cfi_escape 0x2e, 0x00
movq %r13, %rdi
movq %r14, %rsi
movq %r12, %rdx
callq memmove@PLT
.LBB2_57: # %_ZNSt6vectorIfSaIfEE11_S_relocateEPfS2_S2_RS0_.exit.i.i.i47
# in Loop: Header=BB2_18 Depth=1
testq %r14, %r14
je .LBB2_59
# %bb.58: # in Loop: Header=BB2_18 Depth=1
.cfi_escape 0x2e, 0x00
movq %r14, %rdi
callq _ZdlPv
.LBB2_59: # %_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_.exit.i.i49
# in Loop: Header=BB2_18 Depth=1
addq %r13, %r12
leaq (,%rbx,4), %rdx
addq %r13, %rdx
movq 16(%rsp), %rbx # 8-byte Reload
movq 32(%rsp), %rax # 8-byte Reload
jmp .LBB2_60
.LBB2_61:
movq 80(%rsp), %rsi
movq 32(%rbx), %rax
movq %rax, 32(%rsi)
movups (%rbx), %xmm0
movq %rbx, %r14
movups 16(%rbx), %xmm1
movups %xmm1, 16(%rsi)
movups %xmm0, (%rsi)
movq 72(%rsp), %rax
movups (%r13), %xmm0
movups 16(%r13), %xmm1
movups %xmm1, 16(%rax)
movq 32(%r13), %rcx
movq %rcx, 32(%rax)
movups %xmm0, (%rax)
movq 56(%rsp), %rdi
.Ltmp36:
.cfi_escape 0x2e, 0x00
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
.Ltmp37:
# %bb.62:
movq 48(%rsp), %rdi
movq 72(%rsp), %rsi
.Ltmp38:
.cfi_escape 0x2e, 0x00
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
.Ltmp39:
# %bb.63:
.Ltmp40:
.cfi_escape 0x2e, 0x00
movabsq $4294967306, %rdi # imm = 0x10000000A
movabsq $4294967297, %rdx # imm = 0x100000001
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
.Ltmp41:
# %bb.64:
testl %eax, %eax
jne .LBB2_67
# %bb.65:
movq 56(%rsp), %rax
movq 48(%rsp), %rcx
movq 40(%rsp), %rdx
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movq %rdx, 136(%rsp)
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 144(%rsp), %rax
movq %rax, 168(%rsp)
leaq 136(%rsp), %rax
movq %rax, 176(%rsp)
.Ltmp42:
.cfi_escape 0x2e, 0x00
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
.Ltmp43:
# %bb.66: # %.noexc53
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
.Ltmp44:
.cfi_escape 0x2e, 0x10
leaq 160(%rsp), %r9
movl $_Z3addPfS_S_, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.Ltmp45:
.LBB2_67:
movq 64(%rsp), %rdi
movq 40(%rsp), %rsi
.Ltmp46:
.cfi_escape 0x2e, 0x00
movl $40, %edx
movl $2, %ecx
callq hipMemcpy
.Ltmp47:
# %bb.68: # %.preheader.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_69: # %.preheader
# =>This Inner Loop Header: Depth=1
movq 64(%rsp), %rax
movss (%rax,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
.Ltmp48:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
.Ltmp49:
# %bb.70: # %_ZNSolsEf.exit
# in Loop: Header=BB2_69 Depth=1
movq %rax, %r15
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%r15,%rax), %r12
testq %r12, %r12
je .LBB2_71
# %bb.77: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB2_69 Depth=1
cmpb $0, 56(%r12)
je .LBB2_79
# %bb.78: # in Loop: Header=BB2_69 Depth=1
movzbl 67(%r12), %eax
jmp .LBB2_81
.p2align 4, 0x90
.LBB2_79: # in Loop: Header=BB2_69 Depth=1
.Ltmp50:
.cfi_escape 0x2e, 0x00
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
.Ltmp51:
# %bb.80: # %.noexc74
# in Loop: Header=BB2_69 Depth=1
movq (%r12), %rax
.Ltmp52:
.cfi_escape 0x2e, 0x00
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.Ltmp53:
.LBB2_81: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i
# in Loop: Header=BB2_69 Depth=1
.Ltmp54:
.cfi_escape 0x2e, 0x00
movsbl %al, %esi
movq %r15, %rdi
callq _ZNSo3putEc
.Ltmp55:
# %bb.82: # %.noexc76
# in Loop: Header=BB2_69 Depth=1
.Ltmp56:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
.Ltmp57:
# %bb.83: # %_ZNSolsEPFRSoS_E.exit
# in Loop: Header=BB2_69 Depth=1
incq %rbx
cmpq $10, %rbx
jne .LBB2_69
# %bb.84:
movq 80(%rsp), %rdi
.Ltmp59:
.cfi_escape 0x2e, 0x00
callq hipHostFree
.Ltmp60:
# %bb.85:
movq 72(%rsp), %rdi
.Ltmp61:
.cfi_escape 0x2e, 0x00
callq hipHostFree
.Ltmp62:
# %bb.86:
movq 64(%rsp), %rdi
.Ltmp63:
.cfi_escape 0x2e, 0x00
callq hipHostFree
.Ltmp64:
# %bb.87:
movq 56(%rsp), %rdi
.Ltmp65:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp66:
# %bb.88:
movq 48(%rsp), %rdi
.Ltmp67:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp68:
# %bb.89:
movq 40(%rsp), %rdi
.Ltmp69:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp70:
# %bb.90: # %_ZNSt6vectorIfSaIfEED2Ev.exit
testq %r13, %r13
je .LBB2_92
# %bb.91:
.cfi_escape 0x2e, 0x00
movq %r13, %rdi
callq _ZdlPv
.LBB2_92: # %_ZNSt6vectorIfSaIfEED2Ev.exit58
testq %r14, %r14
je .LBB2_94
# %bb.93:
movq %r14, %rdi
.cfi_escape 0x2e, 0x00
callq _ZdlPv
.LBB2_94: # %_ZNSt6vectorIfSaIfEED2Ev.exit60
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_71:
.cfi_def_cfa_offset 240
.Ltmp72:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.Ltmp73:
# %bb.76: # %.noexc73
.LBB2_27:
.Ltmp78:
.cfi_escape 0x2e, 0x00
movl $.L.str.1, %edi
callq _ZSt20__throw_length_errorPKc
.Ltmp79:
# %bb.28: # %.noexc40
.LBB2_45:
.Ltmp75:
.cfi_escape 0x2e, 0x00
movl $.L.str.1, %edi
callq _ZSt20__throw_length_errorPKc
.Ltmp76:
# %bb.46: # %.noexc50
.LBB2_5:
.Ltmp8:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $39, %edx
xorl %r14d, %r14d
movl %eax, %ebx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp9:
# %bb.6: # %.noexc
movl %ebx, %esi
xorl %r13d, %r13d
.Ltmp10:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
xorl %r14d, %r14d
callq _ZNSolsEi
.Ltmp11:
# %bb.7: # %.noexc28
xorl %r13d, %r13d
.Ltmp12:
.cfi_escape 0x2e, 0x00
xorl %r14d, %r14d
movq %rax, %rdi
callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
.Ltmp13:
# %bb.8: # %.noexc29
.cfi_escape 0x2e, 0x00
movl $1, %edi
callq exit
.LBB2_11:
.Ltmp16:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $39, %edx
xorl %r14d, %r14d
movl %eax, %ebx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp17:
# %bb.12: # %.noexc31
movl %ebx, %esi
xorl %r13d, %r13d
.Ltmp18:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
xorl %r14d, %r14d
callq _ZNSolsEi
.Ltmp19:
# %bb.13: # %.noexc32
xorl %r13d, %r13d
.Ltmp20:
.cfi_escape 0x2e, 0x00
xorl %r14d, %r14d
movq %rax, %rdi
callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
.Ltmp21:
# %bb.14: # %.noexc33
.cfi_escape 0x2e, 0x00
movl $1, %edi
callq exit
.LBB2_20:
xorl %r13d, %r13d
.Ltmp24:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $39, %edx
xorl %r14d, %r14d
movl %eax, %ebx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp25:
# %bb.21: # %.noexc36
movl %ebx, %esi
xorl %r13d, %r13d
.Ltmp26:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
xorl %r14d, %r14d
callq _ZNSolsEi
.Ltmp27:
# %bb.22: # %.noexc37
xorl %r13d, %r13d
.Ltmp28:
.cfi_escape 0x2e, 0x00
xorl %r14d, %r14d
movq %rax, %rdi
callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
.Ltmp29:
# %bb.23: # %.noexc38
.cfi_escape 0x2e, 0x00
movl $1, %edi
callq exit
.LBB2_72: # %.loopexit97
.Ltmp32:
jmp .LBB2_73
.LBB2_74: # %.loopexit102
.Ltmp35:
.LBB2_73: # %_ZNSt6vectorIfSaIfEED2Ev.exit62
movq %rax, %r15
movq 24(%rsp), %r13 # 8-byte Reload
jmp .LBB2_98
.LBB2_75: # %.loopexit.split-lp103
.Ltmp77:
movq %rax, %r15
movq 24(%rsp), %r13 # 8-byte Reload
jmp .LBB2_99
.LBB2_97: # %.loopexit.split-lp98
.Ltmp80:
movq %rax, %r15
.LBB2_98: # %_ZNSt6vectorIfSaIfEED2Ev.exit62
movq 16(%rsp), %rbx # 8-byte Reload
jmp .LBB2_99
.LBB2_24:
.Ltmp71:
jmp .LBB2_25
.LBB2_96: # %.loopexit.split-lp
.Ltmp74:
jmp .LBB2_25
.LBB2_95: # %.loopexit
.Ltmp58:
.LBB2_25: # %_ZNSt6vectorIfSaIfEED2Ev.exit62
movq %rax, %r15
movq %r14, %rbx
.LBB2_99: # %_ZNSt6vectorIfSaIfEED2Ev.exit62
testq %r13, %r13
jne .LBB2_100
# %bb.101: # %_ZNSt6vectorIfSaIfEED2Ev.exit64
testq %rbx, %rbx
jne .LBB2_102
.LBB2_103: # %_ZNSt6vectorIfSaIfEED2Ev.exit66
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
callq _Unwind_Resume@PLT
.LBB2_100:
.cfi_escape 0x2e, 0x00
movq %r13, %rdi
callq _ZdlPv
testq %rbx, %rbx
je .LBB2_103
.LBB2_102:
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _ZdlPv
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table2:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp23-.Ltmp0 # Call between .Ltmp0 and .Ltmp23
.uleb128 .Ltmp71-.Lfunc_begin0 # jumps to .Ltmp71
.byte 0 # On action: cleanup
.uleb128 .Ltmp30-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp31-.Ltmp30 # Call between .Ltmp30 and .Ltmp31
.uleb128 .Ltmp32-.Lfunc_begin0 # jumps to .Ltmp32
.byte 0 # On action: cleanup
.uleb128 .Ltmp31-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp33-.Ltmp31 # Call between .Ltmp31 and .Ltmp33
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp33-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp34-.Ltmp33 # Call between .Ltmp33 and .Ltmp34
.uleb128 .Ltmp35-.Lfunc_begin0 # jumps to .Ltmp35
.byte 0 # On action: cleanup
.uleb128 .Ltmp34-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Ltmp36-.Ltmp34 # Call between .Ltmp34 and .Ltmp36
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp36-.Lfunc_begin0 # >> Call Site 6 <<
.uleb128 .Ltmp47-.Ltmp36 # Call between .Ltmp36 and .Ltmp47
.uleb128 .Ltmp71-.Lfunc_begin0 # jumps to .Ltmp71
.byte 0 # On action: cleanup
.uleb128 .Ltmp48-.Lfunc_begin0 # >> Call Site 7 <<
.uleb128 .Ltmp57-.Ltmp48 # Call between .Ltmp48 and .Ltmp57
.uleb128 .Ltmp58-.Lfunc_begin0 # jumps to .Ltmp58
.byte 0 # On action: cleanup
.uleb128 .Ltmp59-.Lfunc_begin0 # >> Call Site 8 <<
.uleb128 .Ltmp70-.Ltmp59 # Call between .Ltmp59 and .Ltmp70
.uleb128 .Ltmp71-.Lfunc_begin0 # jumps to .Ltmp71
.byte 0 # On action: cleanup
.uleb128 .Ltmp72-.Lfunc_begin0 # >> Call Site 9 <<
.uleb128 .Ltmp73-.Ltmp72 # Call between .Ltmp72 and .Ltmp73
.uleb128 .Ltmp74-.Lfunc_begin0 # jumps to .Ltmp74
.byte 0 # On action: cleanup
.uleb128 .Ltmp78-.Lfunc_begin0 # >> Call Site 10 <<
.uleb128 .Ltmp79-.Ltmp78 # Call between .Ltmp78 and .Ltmp79
.uleb128 .Ltmp80-.Lfunc_begin0 # jumps to .Ltmp80
.byte 0 # On action: cleanup
.uleb128 .Ltmp75-.Lfunc_begin0 # >> Call Site 11 <<
.uleb128 .Ltmp76-.Ltmp75 # Call between .Ltmp75 and .Ltmp76
.uleb128 .Ltmp77-.Lfunc_begin0 # jumps to .Ltmp77
.byte 0 # On action: cleanup
.uleb128 .Ltmp8-.Lfunc_begin0 # >> Call Site 12 <<
.uleb128 .Ltmp29-.Ltmp8 # Call between .Ltmp8 and .Ltmp29
.uleb128 .Ltmp71-.Lfunc_begin0 # jumps to .Ltmp71
.byte 0 # On action: cleanup
.uleb128 .Ltmp29-.Lfunc_begin0 # >> Call Site 13 <<
.uleb128 .Lfunc_end2-.Ltmp29 # Call between .Ltmp29 and .Lfunc_end2
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.p2align 2, 0x0
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPfS_S_,@object # @_Z3addPfS_S_
.section .rodata,"a",@progbits
.globl _Z3addPfS_S_
.p2align 3, 0x0
_Z3addPfS_S_:
.quad _Z18__device_stub__addPfS_S_
.size _Z3addPfS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error could not allocate memory result "
.size .L.str, 40
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "vector::_M_realloc_insert"
.size .L.str.1, 26
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPfS_S_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPfS_S_
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Unwind_Resume
.addrsig_sym _Z3addPfS_S_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3addPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0007 */
/*0050*/ IMAD.WIDE.U32 R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0007 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0007 */
/*0090*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPfS_S_
.globl _Z3addPfS_S_
.p2align 8
.type _Z3addPfS_S_,@function
_Z3addPfS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
s_mov_b32 s2, s15
s_mov_b32 s3, 0
s_load_b64 s[0:1], s[0:1], 0x10
s_lshl_b64 s[2:3], s[2:3], 2
v_mov_b32_e32 v1, 0
s_waitcnt lgkmcnt(0)
s_add_u32 s4, s4, s2
s_addc_u32 s5, s5, s3
s_add_u32 s6, s6, s2
s_addc_u32 s7, s7, s3
s_load_b32 s4, s[4:5], 0x0
s_load_b32 s5, s[6:7], 0x0
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
s_waitcnt lgkmcnt(0)
v_add_f32_e64 v0, s4, s5
global_store_b32 v1, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPfS_S_, .Lfunc_end0-_Z3addPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z3addPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00149f16_00000000-6_Main.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4046:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4046:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Error could not allocate memory result "
.text
.globl _Z10cuda_check9cudaError
.type _Z10cuda_check9cudaError, @function
_Z10cuda_check9cudaError:
.LFB4032:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L8
ret
.L8:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movl %edi, %ebx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebx, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE4032:
.size _Z10cuda_check9cudaError, .-_Z10cuda_check9cudaError
.globl _Z26__device_stub__Z3addPfS_S_PfS_S_
.type _Z26__device_stub__Z3addPfS_S_PfS_S_, @function
_Z26__device_stub__Z3addPfS_S_PfS_S_:
.LFB4068:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4068:
.size _Z26__device_stub__Z3addPfS_S_PfS_S_, .-_Z26__device_stub__Z3addPfS_S_PfS_S_
.globl _Z3addPfS_S_
.type _Z3addPfS_S_, @function
_Z3addPfS_S_:
.LFB4069:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z3addPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4069:
.size _Z3addPfS_S_, .-_Z3addPfS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z3addPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4071:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4071:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._ZNSt6vectorIfSaIfEED2Ev,"axG",@progbits,_ZNSt6vectorIfSaIfEED5Ev,comdat
.align 2
.weak _ZNSt6vectorIfSaIfEED2Ev
.type _ZNSt6vectorIfSaIfEED2Ev, @function
_ZNSt6vectorIfSaIfEED2Ev:
.LFB4384:
.cfi_startproc
endbr64
movq (%rdi), %rax
testq %rax, %rax
je .L22
subq $8, %rsp
.cfi_def_cfa_offset 16
movq 16(%rdi), %rsi
subq %rax, %rsi
movq %rax, %rdi
call _ZdlPvm@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.L22:
ret
.cfi_endproc
.LFE4384:
.size _ZNSt6vectorIfSaIfEED2Ev, .-_ZNSt6vectorIfSaIfEED2Ev
.weak _ZNSt6vectorIfSaIfEED1Ev
.set _ZNSt6vectorIfSaIfEED1Ev,_ZNSt6vectorIfSaIfEED2Ev
.section .rodata._ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_.str1.1,"aMS",@progbits,1
.LC2:
.string "vector::_M_realloc_insert"
.section .text._ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_,"axG",@progbits,_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_,comdat
.align 2
.weak _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_
.type _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_, @function
_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_:
.LFB4645:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rsi, (%rsp)
movq %rdx, 8(%rsp)
movq 8(%rdi), %rbp
movq (%rdi), %r13
movq %rbp, %rax
subq %r13, %rax
sarq $2, %rax
movabsq $2305843009213693951, %rdx
cmpq %rdx, %rax
je .L42
movq %rdi, %rbx
cmpq %r13, %rbp
movl $1, %edx
cmovne %rax, %rdx
addq %rdx, %rax
jc .L28
movabsq $2305843009213693951, %r14
cmpq %r14, %rax
cmovbe %rax, %r14
movq (%rsp), %r15
subq %r13, %r15
movl $0, %r12d
testq %rax, %rax
je .L29
jmp .L36
.L42:
leaq .LC2(%rip), %rdi
call _ZSt20__throw_length_errorPKc@PLT
.L43:
movq %r15, %rdx
movq %r13, %rsi
movq %r12, %rdi
call memmove@PLT
leaq 4(%r12,%r15), %r15
movq (%rsp), %rax
subq %rax, %rbp
testq %rbp, %rbp
jg .L31
addq %rbp, %r15
movq 16(%rbx), %rsi
subq %r13, %rsi
jmp .L35
.L28:
movq (%rsp), %r15
subq %r13, %r15
movabsq $2305843009213693951, %r14
.L36:
leaq 0(,%r14,4), %rdi
call _Znwm@PLT
movq %rax, %r12
.L29:
movq 8(%rsp), %rax
movss (%rax), %xmm0
movss %xmm0, (%r12,%r15)
testq %r15, %r15
jg .L43
leaq 4(%r12,%r15), %r15
movq (%rsp), %rax
subq %rax, %rbp
testq %rbp, %rbp
jle .L33
.L31:
movq %rbp, %rdx
movq (%rsp), %rsi
movq %r15, %rdi
call memcpy@PLT
.L33:
addq %rbp, %r15
testq %r13, %r13
je .L34
movq 16(%rbx), %rsi
subq %r13, %rsi
.L35:
movq %r13, %rdi
call _ZdlPvm@PLT
.L34:
movq %r12, (%rbx)
movq %r15, 8(%rbx)
leaq (%r12,%r14,4), %rax
movq %rax, 16(%rbx)
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4645:
.size _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_, .-_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_
.section .text._ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_,"axG",@progbits,_ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_,comdat
.align 2
.weak _ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_
.type _ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_, @function
_ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_:
.LFB4545:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq %rdi, %rbx
movq 8(%rdi), %rax
cmpq 16(%rdi), %rax
je .L45
movss (%rsi), %xmm0
movss %xmm0, (%rax)
addq $4, 8(%rdi)
.L46:
movq 8(%rbx), %rax
subq $4, %rax
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
movq %rsi, %rdx
movq %rax, %rsi
call _ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_
jmp .L46
.cfi_endproc
.LFE4545:
.size _ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_, .-_ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_
.text
.globl main
.type main, @function
main:
.LFB4033:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4033
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $184, %rsp
.cfi_def_cfa_offset 224
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
movq $0, 80(%rsp)
movq $0, 88(%rsp)
movq $0, 96(%rsp)
movq $0, 112(%rsp)
movq $0, 120(%rsp)
movq $0, 128(%rsp)
movq $0, 144(%rsp)
movq $0, 152(%rsp)
movq $0, 160(%rsp)
leaq 32(%rsp), %rdi
movl $40, %esi
.LEHB0:
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $1, %edx
movl $40, %esi
call cudaHostAlloc@PLT
movl %eax, %edi
call _Z10cuda_check9cudaError
leaq 16(%rsp), %rdi
movl $1, %edx
movl $40, %esi
call cudaHostAlloc@PLT
movl %eax, %edi
call _Z10cuda_check9cudaError
leaq 24(%rsp), %rdi
movl $1, %edx
movl $40, %esi
call cudaHostAlloc@PLT
movl %eax, %edi
call _Z10cuda_check9cudaError
movl $0, %ebx
leaq 68(%rsp), %r12
jmp .L49
.L65:
movl %ebp, 68(%rsp)
leaq 112(%rsp), %rdi
movq %r12, %rsi
call _ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_
addl $1, %ebx
cmpl $10, %ebx
je .L64
.L49:
pxor %xmm1, %xmm1
cvtsi2ssl %ebx, %xmm1
movd %xmm1, %ebp
movss %xmm1, 68(%rsp)
leaq 80(%rsp), %rdi
movq %r12, %rsi
call _ZNSt6vectorIfSaIfEE12emplace_backIJfEEERfDpOT_
jmp .L65
.L64:
movq 8(%rsp), %rsi
movq 80(%rsp), %rax
movdqu (%rax), %xmm2
movups %xmm2, (%rsi)
movdqu 16(%rax), %xmm3
movups %xmm3, 16(%rsi)
movq 32(%rax), %rax
movq %rax, 32(%rsi)
movq 16(%rsp), %rax
movq 112(%rsp), %rdx
movdqu (%rdx), %xmm4
movups %xmm4, (%rax)
movdqu 16(%rdx), %xmm5
movups %xmm5, 16(%rax)
movq 32(%rdx), %rdx
movq %rdx, 32(%rax)
movl $1, %ecx
movl $40, %edx
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $40, %edx
movq 16(%rsp), %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $10, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 68(%rsp), %rdx
movl $1, %ecx
movq 56(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L50
movq 48(%rsp), %rdx
movq 40(%rsp), %rsi
movq 32(%rsp), %rdi
call _Z26__device_stub__Z3addPfS_S_PfS_S_
.L50:
movl $2, %ecx
movl $40, %edx
movq 48(%rsp), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %ebp
leaq _ZSt4cout(%rip), %r12
jmp .L55
.L70:
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r13
testq %r13, %r13
je .L66
cmpb $0, 56(%r13)
je .L53
movzbl 67(%r13), %esi
.L54:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
jmp .L67
.L66:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L68
call _ZSt16__throw_bad_castv@PLT
.L59:
endbr64
movq %rax, %rbx
leaq 144(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
leaq 112(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
leaq 80(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
movq 168(%rsp), %rax
subq %fs:40, %rax
je .L57
call __stack_chk_fail@PLT
.L68:
call __stack_chk_fail@PLT
.L53:
movq %r13, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq 0(%r13), %rax
movl $10, %esi
movq %r13, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L54
.L67:
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $4, %rbp
cmpq $40, %rbp
je .L69
.L55:
movq 24(%rsp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbp), %xmm0
movq %r12, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
jmp .L70
.L69:
movq 8(%rsp), %rdi
call cudaFreeHost@PLT
movq 16(%rsp), %rdi
call cudaFreeHost@PLT
movq 24(%rsp), %rdi
call cudaFreeHost@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
.LEHE0:
leaq 144(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
leaq 112(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
leaq 80(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L71
movl $0, %eax
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L57:
.cfi_restore_state
movq %rbx, %rdi
.LEHB1:
call _Unwind_Resume@PLT
.LEHE1:
.L71:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4033:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA4033:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE4033-.LLSDACSB4033
.LLSDACSB4033:
.uleb128 .LEHB0-.LFB4033
.uleb128 .LEHE0-.LEHB0
.uleb128 .L59-.LFB4033
.uleb128 0
.uleb128 .LEHB1-.LFB4033
.uleb128 .LEHE1-.LEHB1
.uleb128 0
.uleb128 0
.LLSDACSE4033:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "Main.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__addPfS_S_ # -- Begin function _Z18__device_stub__addPfS_S_
.p2align 4, 0x90
.type _Z18__device_stub__addPfS_S_,@function
_Z18__device_stub__addPfS_S_: # @_Z18__device_stub__addPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addPfS_S_, .Lfunc_end0-_Z18__device_stub__addPfS_S_
.cfi_endproc
# -- End function
.globl _Z10cuda_check10hipError_t # -- Begin function _Z10cuda_check10hipError_t
.p2align 4, 0x90
.type _Z10cuda_check10hipError_t,@function
_Z10cuda_check10hipError_t: # @_Z10cuda_check10hipError_t
.cfi_startproc
# %bb.0:
testl %edi, %edi
jne .LBB1_2
# %bb.1:
retq
.LBB1_2:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movl %edi, %ebx
movl $_ZSt4cout, %edi
movl $.L.str, %esi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movq %rax, %rdi
movl %ebx, %esi
callq _ZNSolsEi
movq %rax, %rdi
callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
movl $1, %edi
callq exit
.Lfunc_end1:
.size _Z10cuda_check10hipError_t, .Lfunc_end1-_Z10cuda_check10hipError_t
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
xorl %r13d, %r13d
.Ltmp0:
.cfi_escape 0x2e, 0x00
leaq 56(%rsp), %rdi
movl $40, %esi
xorl %r14d, %r14d
callq hipMalloc
.Ltmp1:
# %bb.1:
xorl %r13d, %r13d
.Ltmp2:
.cfi_escape 0x2e, 0x00
leaq 48(%rsp), %rdi
movl $40, %esi
xorl %r14d, %r14d
callq hipMalloc
.Ltmp3:
# %bb.2:
xorl %r13d, %r13d
.Ltmp4:
.cfi_escape 0x2e, 0x00
leaq 40(%rsp), %rdi
movl $40, %esi
xorl %r14d, %r14d
callq hipMalloc
.Ltmp5:
# %bb.3:
xorl %r13d, %r13d
.Ltmp6:
.cfi_escape 0x2e, 0x00
leaq 80(%rsp), %rdi
movl $40, %esi
xorl %r14d, %r14d
movl $1, %edx
callq hipHostAlloc
.Ltmp7:
# %bb.4:
xorl %r13d, %r13d
testl %eax, %eax
jne .LBB2_5
# %bb.9: # %_Z10cuda_check10hipError_t.exit
.Ltmp14:
.cfi_escape 0x2e, 0x00
leaq 72(%rsp), %rdi
movl $40, %esi
xorl %r14d, %r14d
movl $1, %edx
callq hipHostAlloc
.Ltmp15:
# %bb.10:
xorl %r13d, %r13d
testl %eax, %eax
jne .LBB2_11
# %bb.15: # %_Z10cuda_check10hipError_t.exit34
.Ltmp22:
.cfi_escape 0x2e, 0x00
leaq 64(%rsp), %rdi
movl $40, %esi
xorl %r14d, %r14d
movl $1, %edx
callq hipHostAlloc
.Ltmp23:
# %bb.16:
testl %eax, %eax
jne .LBB2_20
# %bb.17: # %_Z10cuda_check10hipError_t.exit39.preheader.preheader
xorl %ebp, %ebp
xorl %ebx, %ebx
xorl %r15d, %r15d
xorl %eax, %eax
xorl %r13d, %r13d
xorl %r12d, %r12d
xorl %edx, %edx
jmp .LBB2_18
.p2align 4, 0x90
.LBB2_43: # in Loop: Header=BB2_18 Depth=1
movss %xmm0, (%r12)
.LBB2_60: # %_ZNSt6vectorIfSaIfEE9push_backEOf.exit52
# in Loop: Header=BB2_18 Depth=1
addq $4, %r15
addq $4, %r12
incl %ebp
cmpl $10, %ebp
je .LBB2_61
.LBB2_18: # %_Z10cuda_check10hipError_t.exit39.preheader
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %ebp, %xmm0
cmpq %rax, %r15
je .LBB2_26
# %bb.19: # in Loop: Header=BB2_18 Depth=1
movss %xmm0, (%r15)
cmpq %rdx, %r12
jne .LBB2_43
jmp .LBB2_44
.p2align 4, 0x90
.LBB2_26: # in Loop: Header=BB2_18 Depth=1
movq %rdx, 32(%rsp) # 8-byte Spill
movq %rbx, 16(%rsp) # 8-byte Spill
subq %rbx, %r15
movabsq $9223372036854775804, %rax # imm = 0x7FFFFFFFFFFFFFFC
cmpq %rax, %r15
je .LBB2_27
# %bb.29: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i
# in Loop: Header=BB2_18 Depth=1
movq %r13, 24(%rsp) # 8-byte Spill
movq %r15, %rbx
sarq $2, %rbx
cmpq $1, %rbx
movq %rbx, %rax
adcq $0, %rax
leaq (%rax,%rbx), %rcx
movabsq $2305843009213693951, %r14 # imm = 0x1FFFFFFFFFFFFFFF
cmpq %r14, %rcx
jb .LBB2_31
# %bb.30: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i
# in Loop: Header=BB2_18 Depth=1
movq %r14, %rcx
.LBB2_31: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i
# in Loop: Header=BB2_18 Depth=1
addq %rbx, %rax
jb .LBB2_33
# %bb.32: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i
# in Loop: Header=BB2_18 Depth=1
movq %rcx, %r14
.LBB2_33: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i
# in Loop: Header=BB2_18 Depth=1
testq %r14, %r14
movss %xmm0, 12(%rsp) # 4-byte Spill
je .LBB2_34
# %bb.35: # in Loop: Header=BB2_18 Depth=1
leaq (,%r14,4), %rdi
.Ltmp30:
.cfi_escape 0x2e, 0x00
callq _Znwm
.Ltmp31:
# %bb.36: # in Loop: Header=BB2_18 Depth=1
movq %rax, %r13
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
jmp .LBB2_37
.LBB2_34: # in Loop: Header=BB2_18 Depth=1
xorl %r13d, %r13d
.LBB2_37: # %_ZNSt12_Vector_baseIfSaIfEE11_M_allocateEm.exit.i.i.i
# in Loop: Header=BB2_18 Depth=1
movss %xmm0, (%r13,%rbx,4)
testq %r15, %r15
movq 16(%rsp), %rbx # 8-byte Reload
jle .LBB2_39
# %bb.38: # in Loop: Header=BB2_18 Depth=1
.cfi_escape 0x2e, 0x00
movq %r13, %rdi
movq %rbx, %rsi
movq %r15, %rdx
callq memmove@PLT
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
.LBB2_39: # %_ZNSt6vectorIfSaIfEE11_S_relocateEPfS2_S2_RS0_.exit.i.i.i
# in Loop: Header=BB2_18 Depth=1
testq %rbx, %rbx
je .LBB2_41
# %bb.40: # in Loop: Header=BB2_18 Depth=1
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _ZdlPv
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
.LBB2_41: # %_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_.exit.i.i
# in Loop: Header=BB2_18 Depth=1
addq %r13, %r15
leaq (,%r14,4), %rax
addq %r13, %rax
movq %r13, %rbx
movq 24(%rsp), %r13 # 8-byte Reload
movq 32(%rsp), %rdx # 8-byte Reload
cmpq %rdx, %r12
jne .LBB2_43
.LBB2_44: # in Loop: Header=BB2_18 Depth=1
movq %rax, 32(%rsp) # 8-byte Spill
movq %r13, 24(%rsp) # 8-byte Spill
subq %r13, %r12
movabsq $9223372036854775804, %rax # imm = 0x7FFFFFFFFFFFFFFC
cmpq %rax, %r12
je .LBB2_45
# %bb.47: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i43
# in Loop: Header=BB2_18 Depth=1
movq %rbx, 16(%rsp) # 8-byte Spill
movq %r12, %r14
sarq $2, %r14
cmpq $1, %r14
movq %r14, %rax
adcq $0, %rax
leaq (%rax,%r14), %rcx
movabsq $2305843009213693951, %rbx # imm = 0x1FFFFFFFFFFFFFFF
cmpq %rbx, %rcx
jae .LBB2_48
# %bb.49: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i43
# in Loop: Header=BB2_18 Depth=1
addq %r14, %rax
jae .LBB2_50
.LBB2_51: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i43
# in Loop: Header=BB2_18 Depth=1
testq %rbx, %rbx
je .LBB2_52
.LBB2_53: # in Loop: Header=BB2_18 Depth=1
movss %xmm0, 12(%rsp) # 4-byte Spill
leaq (,%rbx,4), %rdi
.Ltmp33:
.cfi_escape 0x2e, 0x00
callq _Znwm
.Ltmp34:
# %bb.54: # in Loop: Header=BB2_18 Depth=1
movq %rax, %r13
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
jmp .LBB2_55
.LBB2_48: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i43
# in Loop: Header=BB2_18 Depth=1
movq %rbx, %rcx
addq %r14, %rax
jb .LBB2_51
.LBB2_50: # %_ZNKSt6vectorIfSaIfEE12_M_check_lenEmPKc.exit.i.i.i43
# in Loop: Header=BB2_18 Depth=1
movq %rcx, %rbx
testq %rbx, %rbx
jne .LBB2_53
.LBB2_52: # in Loop: Header=BB2_18 Depth=1
xorl %r13d, %r13d
.LBB2_55: # %_ZNSt12_Vector_baseIfSaIfEE11_M_allocateEm.exit.i.i.i46
# in Loop: Header=BB2_18 Depth=1
movss %xmm0, (%r13,%r14,4)
testq %r12, %r12
movq 24(%rsp), %r14 # 8-byte Reload
jle .LBB2_57
# %bb.56: # in Loop: Header=BB2_18 Depth=1
.cfi_escape 0x2e, 0x00
movq %r13, %rdi
movq %r14, %rsi
movq %r12, %rdx
callq memmove@PLT
.LBB2_57: # %_ZNSt6vectorIfSaIfEE11_S_relocateEPfS2_S2_RS0_.exit.i.i.i47
# in Loop: Header=BB2_18 Depth=1
testq %r14, %r14
je .LBB2_59
# %bb.58: # in Loop: Header=BB2_18 Depth=1
.cfi_escape 0x2e, 0x00
movq %r14, %rdi
callq _ZdlPv
.LBB2_59: # %_ZNSt6vectorIfSaIfEE17_M_realloc_insertIJfEEEvN9__gnu_cxx17__normal_iteratorIPfS1_EEDpOT_.exit.i.i49
# in Loop: Header=BB2_18 Depth=1
addq %r13, %r12
leaq (,%rbx,4), %rdx
addq %r13, %rdx
movq 16(%rsp), %rbx # 8-byte Reload
movq 32(%rsp), %rax # 8-byte Reload
jmp .LBB2_60
.LBB2_61:
movq 80(%rsp), %rsi
movq 32(%rbx), %rax
movq %rax, 32(%rsi)
movups (%rbx), %xmm0
movq %rbx, %r14
movups 16(%rbx), %xmm1
movups %xmm1, 16(%rsi)
movups %xmm0, (%rsi)
movq 72(%rsp), %rax
movups (%r13), %xmm0
movups 16(%r13), %xmm1
movups %xmm1, 16(%rax)
movq 32(%r13), %rcx
movq %rcx, 32(%rax)
movups %xmm0, (%rax)
movq 56(%rsp), %rdi
.Ltmp36:
.cfi_escape 0x2e, 0x00
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
.Ltmp37:
# %bb.62:
movq 48(%rsp), %rdi
movq 72(%rsp), %rsi
.Ltmp38:
.cfi_escape 0x2e, 0x00
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
.Ltmp39:
# %bb.63:
.Ltmp40:
.cfi_escape 0x2e, 0x00
movabsq $4294967306, %rdi # imm = 0x10000000A
movabsq $4294967297, %rdx # imm = 0x100000001
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
.Ltmp41:
# %bb.64:
testl %eax, %eax
jne .LBB2_67
# %bb.65:
movq 56(%rsp), %rax
movq 48(%rsp), %rcx
movq 40(%rsp), %rdx
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movq %rdx, 136(%rsp)
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 144(%rsp), %rax
movq %rax, 168(%rsp)
leaq 136(%rsp), %rax
movq %rax, 176(%rsp)
.Ltmp42:
.cfi_escape 0x2e, 0x00
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
.Ltmp43:
# %bb.66: # %.noexc53
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
.Ltmp44:
.cfi_escape 0x2e, 0x10
leaq 160(%rsp), %r9
movl $_Z3addPfS_S_, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.Ltmp45:
.LBB2_67:
movq 64(%rsp), %rdi
movq 40(%rsp), %rsi
.Ltmp46:
.cfi_escape 0x2e, 0x00
movl $40, %edx
movl $2, %ecx
callq hipMemcpy
.Ltmp47:
# %bb.68: # %.preheader.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_69: # %.preheader
# =>This Inner Loop Header: Depth=1
movq 64(%rsp), %rax
movss (%rax,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
.Ltmp48:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
.Ltmp49:
# %bb.70: # %_ZNSolsEf.exit
# in Loop: Header=BB2_69 Depth=1
movq %rax, %r15
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%r15,%rax), %r12
testq %r12, %r12
je .LBB2_71
# %bb.77: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB2_69 Depth=1
cmpb $0, 56(%r12)
je .LBB2_79
# %bb.78: # in Loop: Header=BB2_69 Depth=1
movzbl 67(%r12), %eax
jmp .LBB2_81
.p2align 4, 0x90
.LBB2_79: # in Loop: Header=BB2_69 Depth=1
.Ltmp50:
.cfi_escape 0x2e, 0x00
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
.Ltmp51:
# %bb.80: # %.noexc74
# in Loop: Header=BB2_69 Depth=1
movq (%r12), %rax
.Ltmp52:
.cfi_escape 0x2e, 0x00
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.Ltmp53:
.LBB2_81: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i
# in Loop: Header=BB2_69 Depth=1
.Ltmp54:
.cfi_escape 0x2e, 0x00
movsbl %al, %esi
movq %r15, %rdi
callq _ZNSo3putEc
.Ltmp55:
# %bb.82: # %.noexc76
# in Loop: Header=BB2_69 Depth=1
.Ltmp56:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
.Ltmp57:
# %bb.83: # %_ZNSolsEPFRSoS_E.exit
# in Loop: Header=BB2_69 Depth=1
incq %rbx
cmpq $10, %rbx
jne .LBB2_69
# %bb.84:
movq 80(%rsp), %rdi
.Ltmp59:
.cfi_escape 0x2e, 0x00
callq hipHostFree
.Ltmp60:
# %bb.85:
movq 72(%rsp), %rdi
.Ltmp61:
.cfi_escape 0x2e, 0x00
callq hipHostFree
.Ltmp62:
# %bb.86:
movq 64(%rsp), %rdi
.Ltmp63:
.cfi_escape 0x2e, 0x00
callq hipHostFree
.Ltmp64:
# %bb.87:
movq 56(%rsp), %rdi
.Ltmp65:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp66:
# %bb.88:
movq 48(%rsp), %rdi
.Ltmp67:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp68:
# %bb.89:
movq 40(%rsp), %rdi
.Ltmp69:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp70:
# %bb.90: # %_ZNSt6vectorIfSaIfEED2Ev.exit
testq %r13, %r13
je .LBB2_92
# %bb.91:
.cfi_escape 0x2e, 0x00
movq %r13, %rdi
callq _ZdlPv
.LBB2_92: # %_ZNSt6vectorIfSaIfEED2Ev.exit58
testq %r14, %r14
je .LBB2_94
# %bb.93:
movq %r14, %rdi
.cfi_escape 0x2e, 0x00
callq _ZdlPv
.LBB2_94: # %_ZNSt6vectorIfSaIfEED2Ev.exit60
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_71:
.cfi_def_cfa_offset 240
.Ltmp72:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.Ltmp73:
# %bb.76: # %.noexc73
.LBB2_27:
.Ltmp78:
.cfi_escape 0x2e, 0x00
movl $.L.str.1, %edi
callq _ZSt20__throw_length_errorPKc
.Ltmp79:
# %bb.28: # %.noexc40
.LBB2_45:
.Ltmp75:
.cfi_escape 0x2e, 0x00
movl $.L.str.1, %edi
callq _ZSt20__throw_length_errorPKc
.Ltmp76:
# %bb.46: # %.noexc50
.LBB2_5:
.Ltmp8:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $39, %edx
xorl %r14d, %r14d
movl %eax, %ebx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp9:
# %bb.6: # %.noexc
movl %ebx, %esi
xorl %r13d, %r13d
.Ltmp10:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
xorl %r14d, %r14d
callq _ZNSolsEi
.Ltmp11:
# %bb.7: # %.noexc28
xorl %r13d, %r13d
.Ltmp12:
.cfi_escape 0x2e, 0x00
xorl %r14d, %r14d
movq %rax, %rdi
callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
.Ltmp13:
# %bb.8: # %.noexc29
.cfi_escape 0x2e, 0x00
movl $1, %edi
callq exit
.LBB2_11:
.Ltmp16:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $39, %edx
xorl %r14d, %r14d
movl %eax, %ebx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp17:
# %bb.12: # %.noexc31
movl %ebx, %esi
xorl %r13d, %r13d
.Ltmp18:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
xorl %r14d, %r14d
callq _ZNSolsEi
.Ltmp19:
# %bb.13: # %.noexc32
xorl %r13d, %r13d
.Ltmp20:
.cfi_escape 0x2e, 0x00
xorl %r14d, %r14d
movq %rax, %rdi
callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
.Ltmp21:
# %bb.14: # %.noexc33
.cfi_escape 0x2e, 0x00
movl $1, %edi
callq exit
.LBB2_20:
xorl %r13d, %r13d
.Ltmp24:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $39, %edx
xorl %r14d, %r14d
movl %eax, %ebx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp25:
# %bb.21: # %.noexc36
movl %ebx, %esi
xorl %r13d, %r13d
.Ltmp26:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
xorl %r14d, %r14d
callq _ZNSolsEi
.Ltmp27:
# %bb.22: # %.noexc37
xorl %r13d, %r13d
.Ltmp28:
.cfi_escape 0x2e, 0x00
xorl %r14d, %r14d
movq %rax, %rdi
callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
.Ltmp29:
# %bb.23: # %.noexc38
.cfi_escape 0x2e, 0x00
movl $1, %edi
callq exit
.LBB2_72: # %.loopexit97
.Ltmp32:
jmp .LBB2_73
.LBB2_74: # %.loopexit102
.Ltmp35:
.LBB2_73: # %_ZNSt6vectorIfSaIfEED2Ev.exit62
movq %rax, %r15
movq 24(%rsp), %r13 # 8-byte Reload
jmp .LBB2_98
.LBB2_75: # %.loopexit.split-lp103
.Ltmp77:
movq %rax, %r15
movq 24(%rsp), %r13 # 8-byte Reload
jmp .LBB2_99
.LBB2_97: # %.loopexit.split-lp98
.Ltmp80:
movq %rax, %r15
.LBB2_98: # %_ZNSt6vectorIfSaIfEED2Ev.exit62
movq 16(%rsp), %rbx # 8-byte Reload
jmp .LBB2_99
.LBB2_24:
.Ltmp71:
jmp .LBB2_25
.LBB2_96: # %.loopexit.split-lp
.Ltmp74:
jmp .LBB2_25
.LBB2_95: # %.loopexit
.Ltmp58:
.LBB2_25: # %_ZNSt6vectorIfSaIfEED2Ev.exit62
movq %rax, %r15
movq %r14, %rbx
.LBB2_99: # %_ZNSt6vectorIfSaIfEED2Ev.exit62
testq %r13, %r13
jne .LBB2_100
# %bb.101: # %_ZNSt6vectorIfSaIfEED2Ev.exit64
testq %rbx, %rbx
jne .LBB2_102
.LBB2_103: # %_ZNSt6vectorIfSaIfEED2Ev.exit66
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
callq _Unwind_Resume@PLT
.LBB2_100:
.cfi_escape 0x2e, 0x00
movq %r13, %rdi
callq _ZdlPv
testq %rbx, %rbx
je .LBB2_103
.LBB2_102:
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _ZdlPv
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table2:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp23-.Ltmp0 # Call between .Ltmp0 and .Ltmp23
.uleb128 .Ltmp71-.Lfunc_begin0 # jumps to .Ltmp71
.byte 0 # On action: cleanup
.uleb128 .Ltmp30-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp31-.Ltmp30 # Call between .Ltmp30 and .Ltmp31
.uleb128 .Ltmp32-.Lfunc_begin0 # jumps to .Ltmp32
.byte 0 # On action: cleanup
.uleb128 .Ltmp31-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp33-.Ltmp31 # Call between .Ltmp31 and .Ltmp33
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp33-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp34-.Ltmp33 # Call between .Ltmp33 and .Ltmp34
.uleb128 .Ltmp35-.Lfunc_begin0 # jumps to .Ltmp35
.byte 0 # On action: cleanup
.uleb128 .Ltmp34-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Ltmp36-.Ltmp34 # Call between .Ltmp34 and .Ltmp36
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp36-.Lfunc_begin0 # >> Call Site 6 <<
.uleb128 .Ltmp47-.Ltmp36 # Call between .Ltmp36 and .Ltmp47
.uleb128 .Ltmp71-.Lfunc_begin0 # jumps to .Ltmp71
.byte 0 # On action: cleanup
.uleb128 .Ltmp48-.Lfunc_begin0 # >> Call Site 7 <<
.uleb128 .Ltmp57-.Ltmp48 # Call between .Ltmp48 and .Ltmp57
.uleb128 .Ltmp58-.Lfunc_begin0 # jumps to .Ltmp58
.byte 0 # On action: cleanup
.uleb128 .Ltmp59-.Lfunc_begin0 # >> Call Site 8 <<
.uleb128 .Ltmp70-.Ltmp59 # Call between .Ltmp59 and .Ltmp70
.uleb128 .Ltmp71-.Lfunc_begin0 # jumps to .Ltmp71
.byte 0 # On action: cleanup
.uleb128 .Ltmp72-.Lfunc_begin0 # >> Call Site 9 <<
.uleb128 .Ltmp73-.Ltmp72 # Call between .Ltmp72 and .Ltmp73
.uleb128 .Ltmp74-.Lfunc_begin0 # jumps to .Ltmp74
.byte 0 # On action: cleanup
.uleb128 .Ltmp78-.Lfunc_begin0 # >> Call Site 10 <<
.uleb128 .Ltmp79-.Ltmp78 # Call between .Ltmp78 and .Ltmp79
.uleb128 .Ltmp80-.Lfunc_begin0 # jumps to .Ltmp80
.byte 0 # On action: cleanup
.uleb128 .Ltmp75-.Lfunc_begin0 # >> Call Site 11 <<
.uleb128 .Ltmp76-.Ltmp75 # Call between .Ltmp75 and .Ltmp76
.uleb128 .Ltmp77-.Lfunc_begin0 # jumps to .Ltmp77
.byte 0 # On action: cleanup
.uleb128 .Ltmp8-.Lfunc_begin0 # >> Call Site 12 <<
.uleb128 .Ltmp29-.Ltmp8 # Call between .Ltmp8 and .Ltmp29
.uleb128 .Ltmp71-.Lfunc_begin0 # jumps to .Ltmp71
.byte 0 # On action: cleanup
.uleb128 .Ltmp29-.Lfunc_begin0 # >> Call Site 13 <<
.uleb128 .Lfunc_end2-.Ltmp29 # Call between .Ltmp29 and .Lfunc_end2
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.p2align 2, 0x0
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPfS_S_,@object # @_Z3addPfS_S_
.section .rodata,"a",@progbits
.globl _Z3addPfS_S_
.p2align 3, 0x0
_Z3addPfS_S_:
.quad _Z18__device_stub__addPfS_S_
.size _Z3addPfS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error could not allocate memory result "
.size .L.str, 40
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "vector::_M_realloc_insert"
.size .L.str.1, 26
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPfS_S_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPfS_S_
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Unwind_Resume
.addrsig_sym _Z3addPfS_S_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void axpy(float a, float* x, float* y) {
// RUN: sh -c "test `grep -c -F 'y[hipThreadIdx_x] = a * x[hipThreadIdx_x];' %t` -eq 2"
y[threadIdx.x] = a * x[threadIdx.x];
} | code for sm_80
Function : _Z4axpyfPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x001fcc00078e0005 */
/*0050*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0060*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fc800078e0005 */
/*0070*/ FMUL R7, R2, c[0x0][0x160] ; /* 0x0000580002077a20 */
/* 0x004fca0000400000 */
/*0080*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0090*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00a0*/ BRA 0xa0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void axpy(float a, float* x, float* y) {
// RUN: sh -c "test `grep -c -F 'y[hipThreadIdx_x] = a * x[hipThreadIdx_x];' %t` -eq 2"
y[threadIdx.x] = a * x[threadIdx.x];
} | .file "tmpxft_00028450_00000000-6_axpy.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z4axpyfPfS_fPfS_
.type _Z26__device_stub__Z4axpyfPfS_fPfS_, @function
_Z26__device_stub__Z4axpyfPfS_fPfS_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movss %xmm0, 28(%rsp)
movq %rdi, 16(%rsp)
movq %rsi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4axpyfPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z26__device_stub__Z4axpyfPfS_fPfS_, .-_Z26__device_stub__Z4axpyfPfS_fPfS_
.globl _Z4axpyfPfS_
.type _Z4axpyfPfS_, @function
_Z4axpyfPfS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z4axpyfPfS_fPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z4axpyfPfS_, .-_Z4axpyfPfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z4axpyfPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z4axpyfPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void axpy(float a, float* x, float* y) {
// RUN: sh -c "test `grep -c -F 'y[hipThreadIdx_x] = a * x[hipThreadIdx_x];' %t` -eq 2"
y[threadIdx.x] = a * x[threadIdx.x];
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void axpy(float a, float* x, float* y) {
// RUN: sh -c "test `grep -c -F 'y[hipThreadIdx_x] = a * x[hipThreadIdx_x];' %t` -eq 2"
y[threadIdx.x] = a * x[threadIdx.x];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void axpy(float a, float* x, float* y) {
// RUN: sh -c "test `grep -c -F 'y[hipThreadIdx_x] = a * x[hipThreadIdx_x];' %t` -eq 2"
y[threadIdx.x] = a * x[threadIdx.x];
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4axpyfPfS_
.globl _Z4axpyfPfS_
.p2align 8
.type _Z4axpyfPfS_,@function
_Z4axpyfPfS_:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x8
s_load_b32 s0, s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v0, s[4:5]
s_waitcnt vmcnt(0)
v_mul_f32_e32 v1, s0, v1
global_store_b32 v0, v1, s[6:7]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4axpyfPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4axpyfPfS_, .Lfunc_end0-_Z4axpyfPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4axpyfPfS_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z4axpyfPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void axpy(float a, float* x, float* y) {
// RUN: sh -c "test `grep -c -F 'y[hipThreadIdx_x] = a * x[hipThreadIdx_x];' %t` -eq 2"
y[threadIdx.x] = a * x[threadIdx.x];
} | .text
.file "axpy.hip"
.globl _Z19__device_stub__axpyfPfS_ # -- Begin function _Z19__device_stub__axpyfPfS_
.p2align 4, 0x90
.type _Z19__device_stub__axpyfPfS_,@function
_Z19__device_stub__axpyfPfS_: # @_Z19__device_stub__axpyfPfS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movss %xmm0, 12(%rsp)
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4axpyfPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z19__device_stub__axpyfPfS_, .Lfunc_end0-_Z19__device_stub__axpyfPfS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4axpyfPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4axpyfPfS_,@object # @_Z4axpyfPfS_
.section .rodata,"a",@progbits
.globl _Z4axpyfPfS_
.p2align 3, 0x0
_Z4axpyfPfS_:
.quad _Z19__device_stub__axpyfPfS_
.size _Z4axpyfPfS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z4axpyfPfS_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__axpyfPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4axpyfPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z4axpyfPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x001fcc00078e0005 */
/*0050*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0060*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fc800078e0005 */
/*0070*/ FMUL R7, R2, c[0x0][0x160] ; /* 0x0000580002077a20 */
/* 0x004fca0000400000 */
/*0080*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0090*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00a0*/ BRA 0xa0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4axpyfPfS_
.globl _Z4axpyfPfS_
.p2align 8
.type _Z4axpyfPfS_,@function
_Z4axpyfPfS_:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x8
s_load_b32 s0, s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v0, s[4:5]
s_waitcnt vmcnt(0)
v_mul_f32_e32 v1, s0, v1
global_store_b32 v0, v1, s[6:7]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4axpyfPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4axpyfPfS_, .Lfunc_end0-_Z4axpyfPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4axpyfPfS_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z4axpyfPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00028450_00000000-6_axpy.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z4axpyfPfS_fPfS_
.type _Z26__device_stub__Z4axpyfPfS_fPfS_, @function
_Z26__device_stub__Z4axpyfPfS_fPfS_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movss %xmm0, 28(%rsp)
movq %rdi, 16(%rsp)
movq %rsi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4axpyfPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z26__device_stub__Z4axpyfPfS_fPfS_, .-_Z26__device_stub__Z4axpyfPfS_fPfS_
.globl _Z4axpyfPfS_
.type _Z4axpyfPfS_, @function
_Z4axpyfPfS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z4axpyfPfS_fPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z4axpyfPfS_, .-_Z4axpyfPfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z4axpyfPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z4axpyfPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "axpy.hip"
.globl _Z19__device_stub__axpyfPfS_ # -- Begin function _Z19__device_stub__axpyfPfS_
.p2align 4, 0x90
.type _Z19__device_stub__axpyfPfS_,@function
_Z19__device_stub__axpyfPfS_: # @_Z19__device_stub__axpyfPfS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movss %xmm0, 12(%rsp)
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4axpyfPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z19__device_stub__axpyfPfS_, .Lfunc_end0-_Z19__device_stub__axpyfPfS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4axpyfPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4axpyfPfS_,@object # @_Z4axpyfPfS_
.section .rodata,"a",@progbits
.globl _Z4axpyfPfS_
.p2align 3, 0x0
_Z4axpyfPfS_:
.quad _Z19__device_stub__axpyfPfS_
.size _Z4axpyfPfS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z4axpyfPfS_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__axpyfPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4axpyfPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda_runtime.h>
const int CachDim = 64;
__global__ void boxBlurSharedKernel(uchar4* pDataIn, uchar4* pDataOut,
int width, int height, int borderSize)
{
// get the position for the current thread
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// calculate the memory adress
const int tid = y * width + x;
__shared__ uchar4 cache[CachDim][CachDim];
uchar4 value = {127, 127, 127, 255};
int meanLength = 2 * borderSize + 1;
int denom = meanLength * meanLength;
for(int cy = threadIdx.y; cy < blockDim.y + meanLength; cy += blockDim.y)
for(int cx = threadIdx.x; cx < blockDim.x + meanLength; cx += blockDim.x)
{
int tmpX = (blockIdx.x * blockDim.x) + cx - borderSize;
int tmpY = (blockIdx.y * blockDim.y) + cy - borderSize;
if(tmpX >= 0 && tmpX < width
&& tmpY >= 0 && tmpY < height)
{
cache[cy][cx] = pDataIn[tmpY * width + tmpX];
}
}
__syncthreads();
if(x >= borderSize && y >= borderSize
&& x + borderSize < width && y + borderSize < height)
{
int3 sum = {0, 0, 0};
for(int cy = 0; cy < meanLength; cy++)
for(int cx = 0; cx < meanLength; cx++)
{
uchar4 locValue = cache[threadIdx.y + cy][threadIdx.x + cx];
sum.x += locValue.x;
sum.y += locValue.y;
sum.z += locValue.z;
}
value.x = (unsigned char)(sum.x / denom);
value.y = (unsigned char)(sum.y / denom);
value.z = (unsigned char)(sum.z / denom);
}
if(x < width && y < height)
{
// write the value back to the global memory
pDataOut[tid] = value;
}
}
void boxBlurShared(uchar4* pDataIn, uchar4* pDataOut,
int width, int height, int blurSize, int blockDimX, int blockDimY)
{
// allocate device memory
unsigned int mem_size = sizeof(uchar4) * width * height;
uchar4* pDevDataIn;
uchar4* pDevDataOut;
cudaError_t res;
res = cudaMalloc((void **) &pDevDataIn, mem_size);
res = cudaMalloc((void **) &pDevDataOut, mem_size);
// copy results from host to device
res = cudaMemcpy(pDevDataIn, pDataIn, mem_size, cudaMemcpyHostToDevice);
// define partitioning
dim3 threadsPerBlock(blockDimX, blockDimY);
dim3 numBlocks(width / threadsPerBlock.x + 1, height / threadsPerBlock.y + 1);
// run the cuda kernel
boxBlurSharedKernel<<<numBlocks, threadsPerBlock>>>(pDevDataIn, pDevDataOut,
width, height, blurSize);
// copy results from device to host
res = cudaMemcpy(pDataOut, pDevDataOut, mem_size, cudaMemcpyDeviceToHost);
// cleanup memory
res = cudaFree(pDevDataIn);
res = cudaFree(pDevDataOut);
} | code for sm_80
Function : _Z19boxBlurSharedKernelP6uchar4S0_iii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e220000002200 */
/*0020*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff0a7624 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0040*/ BSSY B0, 0x2e0 ; /* 0x0000029000007945 */
/* 0x000fe20003800000 */
/*0050*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e620000002500 */
/*0060*/ IMAD.SHL.U32 R15, R10, 0x2, RZ ; /* 0x000000020a0f7824 */
/* 0x000fc600078e00ff */
/*0070*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e640000002100 */
/*0080*/ IADD3 R5, R15, 0x1, RZ ; /* 0x000000010f057810 */
/* 0x000fe40007ffe0ff */
/*0090*/ S2R R11, SR_CTAID.Y ; /* 0x00000000000b7919 */
/* 0x000ea40000002600 */
/*00a0*/ IADD3 R9, R5, c[0x0][0x4], RZ ; /* 0x0000010005097a10 */
/* 0x000fc80007ffe0ff */
/*00b0*/ ISETP.GE.U32.AND P0, PT, R2, R9, PT ; /* 0x000000090200720c */
/* 0x001fe20003f06070 */
/*00c0*/ IMAD R0, R7, c[0x0][0x0], R4 ; /* 0x0000000007007a24 */
/* 0x002fe400078e0204 */
/*00d0*/ IMAD R3, R11, c[0x0][0x4], R2 ; /* 0x000001000b037a24 */
/* 0x004fd400078e0202 */
/*00e0*/ @P0 BRA 0x2d0 ; /* 0x000001e000000947 */
/* 0x000fea0003800000 */
/*00f0*/ IMAD R8, R7, c[0x0][0x0], -R10.reuse ; /* 0x0000000007087a24 */
/* 0x100fe400078e0a0a */
/*0100*/ IMAD R10, R11, c[0x0][0x4], -R10 ; /* 0x000001000b0a7a24 */
/* 0x000fe200078e0a0a */
/*0110*/ IADD3.X R11, R15, c[0x0][0x0], RZ, PT, !PT ; /* 0x000000000f0b7a10 */
/* 0x000fe20003ffe4ff */
/*0120*/ IMAD.MOV.U32 R12, RZ, RZ, R2 ; /* 0x000000ffff0c7224 */
/* 0x000fc600078e0002 */
/*0130*/ ISETP.GE.U32.AND P0, PT, R4, R11, PT ; /* 0x0000000b0400720c */
/* 0x000fe20003f06070 */
/*0140*/ BSSY B1, 0x2a0 ; /* 0x0000015000017945 */
/* 0x000fd80003800000 */
/*0150*/ @P0 BRA 0x290 ; /* 0x0000013000000947 */
/* 0x001fea0003800000 */
/*0160*/ IMAD.IADD R14, R10, 0x1, R12 ; /* 0x000000010a0e7824 */
/* 0x000fe400078e020c */
/*0170*/ IMAD.MOV.U32 R13, RZ, RZ, R4 ; /* 0x000000ffff0d7224 */
/* 0x000fc800078e0004 */
/*0180*/ IMAD.IADD R17, R8, 0x1, R13 ; /* 0x0000000108117824 */
/* 0x001fe200078e020d */
/*0190*/ BSSY B2, 0x260 ; /* 0x000000c000027945 */
/* 0x000fe80003800000 */
/*01a0*/ LOP3.LUT R6, R17, R14, RZ, 0xfc, !PT ; /* 0x0000000e11067212 */
/* 0x000fc800078efcff */
/*01b0*/ ISETP.GE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fc80003f06270 */
/*01c0*/ ISETP.GE.OR P0, PT, R17, c[0x0][0x170], !P0 ; /* 0x00005c0011007a0c */
/* 0x000fc80004706670 */
/*01d0*/ ISETP.GE.OR P0, PT, R14, c[0x0][0x174], P0 ; /* 0x00005d000e007a0c */
/* 0x000fda0000706670 */
/*01e0*/ @P0 BRA 0x250 ; /* 0x0000006000000947 */
/* 0x000fea0003800000 */
/*01f0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fe400078e00ff */
/*0200*/ IMAD R6, R14, c[0x0][0x170], R17 ; /* 0x00005c000e067a24 */
/* 0x000fc800078e0211 */
/*0210*/ IMAD.WIDE R6, R6, R7, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fcc00078e0207 */
/*0220*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x000ea2000c1e1900 */
/*0230*/ IMAD R17, R12, 0x40, R13 ; /* 0x000000400c117824 */
/* 0x000fca00078e020d */
/*0240*/ STS [R17.X4], R6 ; /* 0x0000000611007388 */
/* 0x0041e40000004800 */
/*0250*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0260*/ IADD3 R13, R13, c[0x0][0x0], RZ ; /* 0x000000000d0d7a10 */
/* 0x000fc80007ffe0ff */
/*0270*/ ISETP.GE.U32.AND P0, PT, R13, R11, PT ; /* 0x0000000b0d00720c */
/* 0x000fda0003f06070 */
/*0280*/ @!P0 BRA 0x180 ; /* 0xfffffef000008947 */
/* 0x000fea000383ffff */
/*0290*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*02a0*/ IADD3 R12, R12, c[0x0][0x4], RZ ; /* 0x000001000c0c7a10 */
/* 0x000fc80007ffe0ff */
/*02b0*/ ISETP.GE.U32.AND P0, PT, R12, R9, PT ; /* 0x000000090c00720c */
/* 0x000fda0003f06070 */
/*02c0*/ @!P0 BRA 0x130 ; /* 0xfffffe6000008947 */
/* 0x000fea000383ffff */
/*02d0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*02e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*02f0*/ ISETP.GE.AND P0, PT, R0.reuse, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x040fe20003f06270 */
/*0300*/ IMAD.MOV.U32 R8, RZ, RZ, 0x7f ; /* 0x0000007fff087424 */
/* 0x000fe200078e00ff */
/*0310*/ IADD3 R6, R0, c[0x0][0x178], RZ ; /* 0x00005e0000067a10 */
/* 0x001fe40007ffe0ff */
/*0320*/ ISETP.LT.OR P0, PT, R3.reuse, c[0x0][0x178], !P0 ; /* 0x00005e0003007a0c */
/* 0x040fe20004701670 */
/*0330*/ BSSY B0, 0xb70 ; /* 0x0000083000007945 */
/* 0x000fe20003800000 */
/*0340*/ IADD3 R7, R3, c[0x0][0x178], RZ ; /* 0x00005e0003077a10 */
/* 0x000fe40007ffe0ff */
/*0350*/ ISETP.GE.OR P0, PT, R6, c[0x0][0x170], P0 ; /* 0x00005c0006007a0c */
/* 0x000fe20000706670 */
/*0360*/ IMAD.MOV.U32 R6, RZ, RZ, 0x7f ; /* 0x0000007fff067424 */
/* 0x000fc600078e00ff */
/*0370*/ ISETP.GE.OR P0, PT, R7, c[0x0][0x174], P0 ; /* 0x00005d0007007a0c */
/* 0x000fe20000706670 */
/*0380*/ IMAD.MOV.U32 R7, RZ, RZ, 0x7f ; /* 0x0000007fff077424 */
/* 0x000fd800078e00ff */
/*0390*/ @P0 BRA 0xb60 ; /* 0x000007c000000947 */
/* 0x000fea0003800000 */
/*03a0*/ ISETP.LE.AND P0, PT, RZ, c[0x0][0x178], PT ; /* 0x00005e00ff007a0c */
/* 0x000fe20003f03270 */
/*03b0*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x000fe200078e00ff */
/*03c0*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fd6000001ff00 */
/*03d0*/ @!P0 BRA 0x810 ; /* 0x0000043000008947 */
/* 0x000fea0003800000 */
/*03e0*/ IMNMX R22, RZ, R15, !PT ; /* 0x0000000fff167217 */
/* 0x000fe20007800200 */
/*03f0*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0400*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x000fe200078e00ff */
/*0410*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0420*/ IADD3 R10, R22, 0x1, RZ ; /* 0x00000001160a7810 */
/* 0x000fc80007ffe0ff */
/*0430*/ LOP3.LUT R9, R10, 0x3, RZ, 0xc0, !PT ; /* 0x000000030a097812 */
/* 0x000fca00078ec0ff */
/*0440*/ IMAD.IADD R11, R10, 0x1, -R9 ; /* 0x000000010a0b7824 */
/* 0x000fe400078e0a09 */
/*0450*/ ISETP.GE.U32.AND P0, PT, R22, 0x3, PT ; /* 0x000000031600780c */
/* 0x000fe20003f06070 */
/*0460*/ UMOV UR5, URZ ; /* 0x0000003f00057c82 */
/* 0x000fe20008000000 */
/*0470*/ IADD3 R12, R2, UR4, RZ ; /* 0x00000004020c7c10 */
/* 0x000fd6000fffe0ff */
/*0480*/ @!P0 BRA 0x670 ; /* 0x000001e000008947 */
/* 0x000fea0003800000 */
/*0490*/ IMAD.MOV.U32 R10, RZ, RZ, R11 ; /* 0x000000ffff0a7224 */
/* 0x000fe200078e000b */
/*04a0*/ UMOV UR5, URZ ; /* 0x0000003f00057c82 */
/* 0x000fe40008000000 */
/*04b0*/ IADD3 R13, R4, UR5, RZ ; /* 0x00000005040d7c10 */
/* 0x000fe2000fffe0ff */
/*04c0*/ UIADD3 UR5, UR5, 0x4, URZ ; /* 0x0000000405057890 */
/* 0x000fe2000fffe03f */
/*04d0*/ IADD3 R10, R10, -0x4, RZ ; /* 0xfffffffc0a0a7810 */
/* 0x000fc60007ffe0ff */
/*04e0*/ IMAD R23, R12, 0x40, R13 ; /* 0x000000400c177824 */
/* 0x000fe200078e020d */
/*04f0*/ ISETP.NE.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fc80003f05270 */
/*0500*/ LDS R16, [R23.X4] ; /* 0x0000000017107984 */
/* 0x000e280000004800 */
/*0510*/ LDS R21, [R23.X4+0x4] ; /* 0x0000040017157984 */
/* 0x000e680000004800 */
/*0520*/ LDS R14, [R23.X4+0x8] ; /* 0x00000800170e7984 */
/* 0x000ea80000004800 */
/*0530*/ LDS R13, [R23.X4+0xc] ; /* 0x00000c00170d7984 */
/* 0x000ee20000004800 */
/*0540*/ PRMT R17, R16, 0x7770, RZ ; /* 0x0000777010117816 */
/* 0x001fc400000000ff */
/*0550*/ PRMT R19, R16.reuse, 0x7771, RZ ; /* 0x0000777110137816 */
/* 0x040fe400000000ff */
/*0560*/ PRMT R18, R21.reuse, 0x7770, RZ ; /* 0x0000777015127816 */
/* 0x042fe400000000ff */
/*0570*/ PRMT R20, R21.reuse, 0x7771, RZ ; /* 0x0000777115147816 */
/* 0x040fe400000000ff */
/*0580*/ PRMT R16, R16, 0x7772, RZ ; /* 0x0000777210107816 */
/* 0x000fe400000000ff */
/*0590*/ PRMT R21, R21, 0x7772, RZ ; /* 0x0000777215157816 */
/* 0x000fe400000000ff */
/*05a0*/ IADD3 R6, R18, R6, R17 ; /* 0x0000000612067210 */
/* 0x000fc40007ffe011 */
/*05b0*/ IADD3 R8, R20, R8, R19 ; /* 0x0000000814087210 */
/* 0x000fe40007ffe013 */
/*05c0*/ IADD3 R7, R21, R7, R16 ; /* 0x0000000715077210 */
/* 0x000fe40007ffe010 */
/*05d0*/ PRMT R17, R14.reuse, 0x7770, RZ ; /* 0x000077700e117816 */
/* 0x044fe400000000ff */
/*05e0*/ PRMT R19, R14, 0x7771, RZ ; /* 0x000077710e137816 */
/* 0x000fe400000000ff */
/*05f0*/ PRMT R16, R13.reuse, 0x7770, RZ ; /* 0x000077700d107816 */
/* 0x048fe400000000ff */
/*0600*/ PRMT R18, R13, 0x7771, RZ ; /* 0x000077710d127816 */
/* 0x000fc400000000ff */
/*0610*/ PRMT R14, R14, 0x7772, RZ ; /* 0x000077720e0e7816 */
/* 0x000fe400000000ff */
/*0620*/ PRMT R13, R13, 0x7772, RZ ; /* 0x000077720d0d7816 */
/* 0x000fe400000000ff */
/*0630*/ IADD3 R6, R16, R6, R17 ; /* 0x0000000610067210 */
/* 0x000fe40007ffe011 */
/*0640*/ IADD3 R8, R18, R8, R19 ; /* 0x0000000812087210 */
/* 0x000fe40007ffe013 */
/*0650*/ IADD3 R7, R13, R7, R14 ; /* 0x000000070d077210 */
/* 0x000fe20007ffe00e */
/*0660*/ @P0 BRA 0x4b0 ; /* 0xfffffe4000000947 */
/* 0x000fea000383ffff */
/*0670*/ IADD3 R13, R4, UR5, RZ ; /* 0x00000005040d7c10 */
/* 0x000fe4000fffe0ff */
/*0680*/ ISETP.NE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */
/* 0x000fc40003f05270 */
/*0690*/ ISETP.LE.AND P1, PT, R15, UR4, PT ; /* 0x000000040f007c0c */
/* 0x000fe2000bf23270 */
/*06a0*/ IMAD R13, R12, 0x40, R13 ; /* 0x000000400c0d7824 */
/* 0x000fe200078e020d */
/*06b0*/ UIADD3 UR4, UR4, 0x1, URZ ; /* 0x0000000104047890 */
/* 0x000fc6000fffe03f */
/*06c0*/ IMAD.SHL.U32 R18, R13, 0x4, RZ ; /* 0x000000040d127824 */
/* 0x000fca00078e00ff */
/*06d0*/ LDS R10, [R18] ; /* 0x00000000120a7984 */
/* 0x000e240000000800 */
/*06e0*/ PRMT R13, R10.reuse, 0x7770, RZ ; /* 0x000077700a0d7816 */
/* 0x041fe400000000ff */
/*06f0*/ PRMT R17, R10.reuse, 0x7771, RZ ; /* 0x000077710a117816 */
/* 0x040fe400000000ff */
/*0700*/ PRMT R10, R10, 0x7772, RZ ; /* 0x000077720a0a7816 */
/* 0x000fe200000000ff */
/*0710*/ IMAD.IADD R6, R13, 0x1, R6 ; /* 0x000000010d067824 */
/* 0x000fe400078e0206 */
/*0720*/ IMAD.IADD R8, R17, 0x1, R8 ; /* 0x0000000111087824 */
/* 0x000fe400078e0208 */
/*0730*/ IMAD.IADD R7, R10, 0x1, R7 ; /* 0x000000010a077824 */
/* 0x000fe200078e0207 */
/*0740*/ @!P0 BRA 0x800 ; /* 0x000000b000008947 */
/* 0x000fea0003800000 */
/*0750*/ LDS R10, [R18+0x4] ; /* 0x00000400120a7984 */
/* 0x000e280000000800 */
/*0760*/ LDS R16, [R18+0x8] ; /* 0x0000080012107984 */
/* 0x000e620000000800 */
/*0770*/ PRMT R13, R10, 0x7770, RZ ; /* 0x000077700a0d7816 */
/* 0x001fc400000000ff */
/*0780*/ PRMT R17, R10, 0x7771, RZ ; /* 0x000077710a117816 */
/* 0x000fe400000000ff */
/*0790*/ PRMT R12, R16.reuse, 0x7770, RZ ; /* 0x00007770100c7816 */
/* 0x042fe400000000ff */
/*07a0*/ PRMT R14, R16, 0x7771, RZ ; /* 0x00007771100e7816 */
/* 0x000fe400000000ff */
/*07b0*/ PRMT R10, R10, 0x7772, RZ ; /* 0x000077720a0a7816 */
/* 0x000fe400000000ff */
/*07c0*/ PRMT R16, R16, 0x7772, RZ ; /* 0x0000777210107816 */
/* 0x000fe400000000ff */
/*07d0*/ IADD3 R6, R12, R6, R13 ; /* 0x000000060c067210 */
/* 0x000fc40007ffe00d */
/*07e0*/ IADD3 R8, R14, R8, R17 ; /* 0x000000080e087210 */
/* 0x000fe40007ffe011 */
/*07f0*/ IADD3 R7, R16, R7, R10 ; /* 0x0000000710077210 */
/* 0x000fe40007ffe00a */
/*0800*/ @!P1 BRA 0x450 ; /* 0xfffffc4000009947 */
/* 0x000fea000383ffff */
/*0810*/ IMAD R5, R5, R5, RZ ; /* 0x0000000505057224 */
/* 0x000fe200078e02ff */
/*0820*/ IABS R12, R6 ; /* 0x00000006000c7213 */
/* 0x000fe40000000000 */
/*0830*/ IABS R14, R8 ; /* 0x00000008000e7213 */
/* 0x000fe40000000000 */
/*0840*/ IABS R2, R5.reuse ; /* 0x0000000500027213 */
/* 0x080fe40000000000 */
/*0850*/ IABS R13, R5 ; /* 0x00000005000d7213 */
/* 0x000fe40000000000 */
/*0860*/ I2F.RP R4, R2 ; /* 0x0000000200047306 */
/* 0x000e220000209400 */
/*0870*/ IABS R16, R7 ; /* 0x0000000700107213 */
/* 0x000fc40000000000 */
/*0880*/ IMAD.MOV R13, RZ, RZ, -R13 ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e0a0d */
/*0890*/ LOP3.LUT R6, R6, R5.reuse, RZ, 0x3c, !PT ; /* 0x0000000506067212 */
/* 0x080fe400078e3cff */
/*08a0*/ LOP3.LUT R8, R8, R5.reuse, RZ, 0x3c, !PT ; /* 0x0000000508087212 */
/* 0x080fe400078e3cff */
/*08b0*/ LOP3.LUT R7, R7, R5, RZ, 0x3c, !PT ; /* 0x0000000507077212 */
/* 0x000fe400078e3cff */
/*08c0*/ ISETP.GE.AND P4, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f86270 */
/*08d0*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x001e240000001000 */
/*08e0*/ IADD3 R10, R4, 0xffffffe, RZ ; /* 0x0ffffffe040a7810 */
/* 0x001fcc0007ffe0ff */
/*08f0*/ F2I.FTZ.U32.TRUNC.NTZ R11, R10 ; /* 0x0000000a000b7305 */
/* 0x000064000021f000 */
/*0900*/ IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a7224 */
/* 0x001fe400078e00ff */
/*0910*/ IMAD.MOV R9, RZ, RZ, -R11 ; /* 0x000000ffff097224 */
/* 0x002fc800078e0a0b */
/*0920*/ IMAD R9, R9, R2, RZ ; /* 0x0000000209097224 */
/* 0x000fc800078e02ff */
/*0930*/ IMAD.HI.U32 R11, R11, R9, R10 ; /* 0x000000090b0b7227 */
/* 0x000fc800078e000a */
/*0940*/ IMAD.MOV.U32 R9, RZ, RZ, R12 ; /* 0x000000ffff097224 */
/* 0x000fe400078e000c */
/*0950*/ IMAD.HI.U32 R10, R11, R14, RZ ; /* 0x0000000e0b0a7227 */
/* 0x000fc800078e00ff */
/*0960*/ IMAD.HI.U32 R4, R11, R9, RZ ; /* 0x000000090b047227 */
/* 0x000fc800078e00ff */
/*0970*/ IMAD.HI.U32 R12, R11, R16, RZ ; /* 0x000000100b0c7227 */
/* 0x000fc800078e00ff */
/*0980*/ IMAD R9, R4, R13.reuse, R9 ; /* 0x0000000d04097224 */
/* 0x080fe400078e0209 */
/*0990*/ IMAD R11, R10, R13.reuse, R14 ; /* 0x0000000d0a0b7224 */
/* 0x080fe400078e020e */
/*09a0*/ IMAD R13, R12, R13, R16 ; /* 0x0000000d0c0d7224 */
/* 0x000fe200078e0210 */
/*09b0*/ ISETP.GT.U32.AND P5, PT, R2.reuse, R9, PT ; /* 0x000000090200720c */
/* 0x040fe40003fa4070 */
/*09c0*/ ISETP.GT.U32.AND P6, PT, R2.reuse, R11, PT ; /* 0x0000000b0200720c */
/* 0x040fe40003fc4070 */
/*09d0*/ ISETP.GT.U32.AND P0, PT, R2, R13, PT ; /* 0x0000000d0200720c */
/* 0x000fd20003f04070 */
/*09e0*/ @!P5 IMAD.IADD R9, R9, 0x1, -R2.reuse ; /* 0x000000010909d824 */
/* 0x100fe200078e0a02 */
/*09f0*/ @!P5 IADD3 R4, R4, 0x1, RZ ; /* 0x000000010404d810 */
/* 0x000fe20007ffe0ff */
/*0a00*/ @!P6 IMAD.IADD R11, R11, 0x1, -R2.reuse ; /* 0x000000010b0be824 */
/* 0x100fe200078e0a02 */
/*0a10*/ ISETP.GE.AND P5, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003fa6270 */
/*0a20*/ @!P0 IMAD.IADD R13, R13, 0x1, -R2 ; /* 0x000000010d0d8824 */
/* 0x000fe200078e0a02 */
/*0a30*/ ISETP.GE.U32.AND P1, PT, R9, R2.reuse, PT ; /* 0x000000020900720c */
/* 0x080fe40003f26070 */
/*0a40*/ ISETP.GE.U32.AND P2, PT, R11, R2.reuse, PT ; /* 0x000000020b00720c */
/* 0x080fe40003f46070 */
/*0a50*/ ISETP.GE.U32.AND P3, PT, R13, R2, PT ; /* 0x000000020d00720c */
/* 0x000fe40003f66070 */
/*0a60*/ @!P6 IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0ae810 */
/* 0x000fc40007ffe0ff */
/*0a70*/ ISETP.GE.AND P6, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe40003fc6270 */
/*0a80*/ @!P0 IADD3 R12, R12, 0x1, RZ ; /* 0x000000010c0c8810 */
/* 0x000fe40007ffe0ff */
/*0a90*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe40003f05270 */
/*0aa0*/ @P1 IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104041810 */
/* 0x000fe40007ffe0ff */
/*0ab0*/ @P2 IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0a2810 */
/* 0x000fe40007ffe0ff */
/*0ac0*/ @P3 IADD3 R12, R12, 0x1, RZ ; /* 0x000000010c0c3810 */
/* 0x000fe20007ffe0ff */
/*0ad0*/ @!P4 IMAD.MOV R4, RZ, RZ, -R4 ; /* 0x000000ffff04c224 */
/* 0x000fe200078e0a04 */
/*0ae0*/ LOP3.LUT R5, RZ, R5, RZ, 0x33, !PT ; /* 0x00000005ff057212 */
/* 0x000fe200078e33ff */
/*0af0*/ @!P5 IMAD.MOV R10, RZ, RZ, -R10 ; /* 0x000000ffff0ad224 */
/* 0x000fc400078e0a0a */
/*0b00*/ @!P6 IMAD.MOV R12, RZ, RZ, -R12 ; /* 0x000000ffff0ce224 */
/* 0x000fe200078e0a0c */
/*0b10*/ SEL R4, R5.reuse, R4, !P0 ; /* 0x0000000405047207 */
/* 0x040fe40004000000 */
/*0b20*/ SEL R7, R5.reuse, R10, !P0 ; /* 0x0000000a05077207 */
/* 0x040fe40004000000 */
/*0b30*/ SEL R5, R5, R12, !P0 ; /* 0x0000000c05057207 */
/* 0x000fe40004000000 */
/*0b40*/ PRMT R6, R4, 0x7610, R6 ; /* 0x0000761004067816 */
/* 0x000fe40000000006 */
/*0b50*/ PRMT R8, R5, 0x7610, R8 ; /* 0x0000761005087816 */
/* 0x000fe40000000008 */
/*0b60*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0b70*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */
/* 0x000fc80003f06270 */
/*0b80*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fda0000706670 */
/*0b90*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0ba0*/ PRMT R7, R7, 0x7604, R6 ; /* 0x0000760407077816 */
/* 0x000fe20000000006 */
/*0bb0*/ IMAD R3, R3, c[0x0][0x170], R0 ; /* 0x00005c0003037a24 */
/* 0x000fe400078e0200 */
/*0bc0*/ IMAD.MOV.U32 R0, RZ, RZ, 0xff ; /* 0x000000ffff007424 */
/* 0x000fe200078e00ff */
/*0bd0*/ PRMT R7, R8, 0x7054, R7 ; /* 0x0000705408077816 */
/* 0x000fe20000000007 */
/*0be0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fc600078e00ff */
/*0bf0*/ PRMT R7, R0, 0x654, R7 ; /* 0x0000065400077816 */
/* 0x000fe20000000007 */
/*0c00*/ IMAD.WIDE R2, R3, R2, c[0x0][0x168] ; /* 0x00005a0003027625 */
/* 0x000fca00078e0202 */
/*0c10*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101906 */
/*0c20*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c30*/ BRA 0xc30; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda_runtime.h>
const int CachDim = 64;
__global__ void boxBlurSharedKernel(uchar4* pDataIn, uchar4* pDataOut,
int width, int height, int borderSize)
{
// get the position for the current thread
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// calculate the memory adress
const int tid = y * width + x;
__shared__ uchar4 cache[CachDim][CachDim];
uchar4 value = {127, 127, 127, 255};
int meanLength = 2 * borderSize + 1;
int denom = meanLength * meanLength;
for(int cy = threadIdx.y; cy < blockDim.y + meanLength; cy += blockDim.y)
for(int cx = threadIdx.x; cx < blockDim.x + meanLength; cx += blockDim.x)
{
int tmpX = (blockIdx.x * blockDim.x) + cx - borderSize;
int tmpY = (blockIdx.y * blockDim.y) + cy - borderSize;
if(tmpX >= 0 && tmpX < width
&& tmpY >= 0 && tmpY < height)
{
cache[cy][cx] = pDataIn[tmpY * width + tmpX];
}
}
__syncthreads();
if(x >= borderSize && y >= borderSize
&& x + borderSize < width && y + borderSize < height)
{
int3 sum = {0, 0, 0};
for(int cy = 0; cy < meanLength; cy++)
for(int cx = 0; cx < meanLength; cx++)
{
uchar4 locValue = cache[threadIdx.y + cy][threadIdx.x + cx];
sum.x += locValue.x;
sum.y += locValue.y;
sum.z += locValue.z;
}
value.x = (unsigned char)(sum.x / denom);
value.y = (unsigned char)(sum.y / denom);
value.z = (unsigned char)(sum.z / denom);
}
if(x < width && y < height)
{
// write the value back to the global memory
pDataOut[tid] = value;
}
}
void boxBlurShared(uchar4* pDataIn, uchar4* pDataOut,
int width, int height, int blurSize, int blockDimX, int blockDimY)
{
// allocate device memory
unsigned int mem_size = sizeof(uchar4) * width * height;
uchar4* pDevDataIn;
uchar4* pDevDataOut;
cudaError_t res;
res = cudaMalloc((void **) &pDevDataIn, mem_size);
res = cudaMalloc((void **) &pDevDataOut, mem_size);
// copy results from host to device
res = cudaMemcpy(pDevDataIn, pDataIn, mem_size, cudaMemcpyHostToDevice);
// define partitioning
dim3 threadsPerBlock(blockDimX, blockDimY);
dim3 numBlocks(width / threadsPerBlock.x + 1, height / threadsPerBlock.y + 1);
// run the cuda kernel
boxBlurSharedKernel<<<numBlocks, threadsPerBlock>>>(pDevDataIn, pDevDataOut,
width, height, blurSize);
// copy results from device to host
res = cudaMemcpy(pDataOut, pDevDataOut, mem_size, cudaMemcpyDeviceToHost);
// cleanup memory
res = cudaFree(pDevDataIn);
res = cudaFree(pDevDataOut);
} | .file "tmpxft_000f42e4_00000000-6_BoxBlurShared.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z51__device_stub__Z19boxBlurSharedKernelP6uchar4S0_iiiP6uchar4S0_iii
.type _Z51__device_stub__Z19boxBlurSharedKernelP6uchar4S0_iiiP6uchar4S0_iii, @function
_Z51__device_stub__Z19boxBlurSharedKernelP6uchar4S0_iiiP6uchar4S0_iii:
.LFB2052:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19boxBlurSharedKernelP6uchar4S0_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z51__device_stub__Z19boxBlurSharedKernelP6uchar4S0_iiiP6uchar4S0_iii, .-_Z51__device_stub__Z19boxBlurSharedKernelP6uchar4S0_iiiP6uchar4S0_iii
.globl _Z19boxBlurSharedKernelP6uchar4S0_iii
.type _Z19boxBlurSharedKernelP6uchar4S0_iii, @function
_Z19boxBlurSharedKernelP6uchar4S0_iii:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z19boxBlurSharedKernelP6uchar4S0_iiiP6uchar4S0_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z19boxBlurSharedKernelP6uchar4S0_iii, .-_Z19boxBlurSharedKernelP6uchar4S0_iii
.globl _Z13boxBlurSharedP6uchar4S0_iiiii
.type _Z13boxBlurSharedP6uchar4S0_iiiii, @function
_Z13boxBlurSharedP6uchar4S0_iiiii:
.LFB2027:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdi, %r15
movq %rsi, (%rsp)
movl %edx, %ebx
movl %ecx, %ebp
movl %r8d, 12(%rsp)
movl %r9d, %r12d
movl 128(%rsp), %r14d
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl %edx, %eax
imull %ecx, %eax
leal 0(,%rax,4), %r13d
leaq 16(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r13, %rdx
movq %r15, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebx, %eax
movl $0, %edx
divl %r12d
addl $1, %eax
movl %eax, 44(%rsp)
movl %ebp, %eax
movl $0, %edx
divl %r14d
addl $1, %eax
movl %eax, 48(%rsp)
movl %r12d, 32(%rsp)
movl %r14d, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movl $1, %ecx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
movl $2, %ecx
movq %r13, %rdx
movq 24(%rsp), %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movl 12(%rsp), %r8d
movl %ebp, %ecx
movl %ebx, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z51__device_stub__Z19boxBlurSharedKernelP6uchar4S0_iiiP6uchar4S0_iii
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size _Z13boxBlurSharedP6uchar4S0_iiiii, .-_Z13boxBlurSharedP6uchar4S0_iiiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z19boxBlurSharedKernelP6uchar4S0_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19boxBlurSharedKernelP6uchar4S0_iii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda_runtime.h>
const int CachDim = 64;
__global__ void boxBlurSharedKernel(uchar4* pDataIn, uchar4* pDataOut,
int width, int height, int borderSize)
{
// get the position for the current thread
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// calculate the memory adress
const int tid = y * width + x;
__shared__ uchar4 cache[CachDim][CachDim];
uchar4 value = {127, 127, 127, 255};
int meanLength = 2 * borderSize + 1;
int denom = meanLength * meanLength;
for(int cy = threadIdx.y; cy < blockDim.y + meanLength; cy += blockDim.y)
for(int cx = threadIdx.x; cx < blockDim.x + meanLength; cx += blockDim.x)
{
int tmpX = (blockIdx.x * blockDim.x) + cx - borderSize;
int tmpY = (blockIdx.y * blockDim.y) + cy - borderSize;
if(tmpX >= 0 && tmpX < width
&& tmpY >= 0 && tmpY < height)
{
cache[cy][cx] = pDataIn[tmpY * width + tmpX];
}
}
__syncthreads();
if(x >= borderSize && y >= borderSize
&& x + borderSize < width && y + borderSize < height)
{
int3 sum = {0, 0, 0};
for(int cy = 0; cy < meanLength; cy++)
for(int cx = 0; cx < meanLength; cx++)
{
uchar4 locValue = cache[threadIdx.y + cy][threadIdx.x + cx];
sum.x += locValue.x;
sum.y += locValue.y;
sum.z += locValue.z;
}
value.x = (unsigned char)(sum.x / denom);
value.y = (unsigned char)(sum.y / denom);
value.z = (unsigned char)(sum.z / denom);
}
if(x < width && y < height)
{
// write the value back to the global memory
pDataOut[tid] = value;
}
}
void boxBlurShared(uchar4* pDataIn, uchar4* pDataOut,
int width, int height, int blurSize, int blockDimX, int blockDimY)
{
// allocate device memory
unsigned int mem_size = sizeof(uchar4) * width * height;
uchar4* pDevDataIn;
uchar4* pDevDataOut;
cudaError_t res;
res = cudaMalloc((void **) &pDevDataIn, mem_size);
res = cudaMalloc((void **) &pDevDataOut, mem_size);
// copy results from host to device
res = cudaMemcpy(pDevDataIn, pDataIn, mem_size, cudaMemcpyHostToDevice);
// define partitioning
dim3 threadsPerBlock(blockDimX, blockDimY);
dim3 numBlocks(width / threadsPerBlock.x + 1, height / threadsPerBlock.y + 1);
// run the cuda kernel
boxBlurSharedKernel<<<numBlocks, threadsPerBlock>>>(pDevDataIn, pDevDataOut,
width, height, blurSize);
// copy results from device to host
res = cudaMemcpy(pDataOut, pDevDataOut, mem_size, cudaMemcpyDeviceToHost);
// cleanup memory
res = cudaFree(pDevDataIn);
res = cudaFree(pDevDataOut);
} | #include <hip/hip_runtime.h>
const int CachDim = 64;
__global__ void boxBlurSharedKernel(uchar4* pDataIn, uchar4* pDataOut,
int width, int height, int borderSize)
{
// get the position for the current thread
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// calculate the memory adress
const int tid = y * width + x;
__shared__ uchar4 cache[CachDim][CachDim];
uchar4 value = {127, 127, 127, 255};
int meanLength = 2 * borderSize + 1;
int denom = meanLength * meanLength;
for(int cy = threadIdx.y; cy < blockDim.y + meanLength; cy += blockDim.y)
for(int cx = threadIdx.x; cx < blockDim.x + meanLength; cx += blockDim.x)
{
int tmpX = (blockIdx.x * blockDim.x) + cx - borderSize;
int tmpY = (blockIdx.y * blockDim.y) + cy - borderSize;
if(tmpX >= 0 && tmpX < width
&& tmpY >= 0 && tmpY < height)
{
cache[cy][cx] = pDataIn[tmpY * width + tmpX];
}
}
__syncthreads();
if(x >= borderSize && y >= borderSize
&& x + borderSize < width && y + borderSize < height)
{
int3 sum = {0, 0, 0};
for(int cy = 0; cy < meanLength; cy++)
for(int cx = 0; cx < meanLength; cx++)
{
uchar4 locValue = cache[threadIdx.y + cy][threadIdx.x + cx];
sum.x += locValue.x;
sum.y += locValue.y;
sum.z += locValue.z;
}
value.x = (unsigned char)(sum.x / denom);
value.y = (unsigned char)(sum.y / denom);
value.z = (unsigned char)(sum.z / denom);
}
if(x < width && y < height)
{
// write the value back to the global memory
pDataOut[tid] = value;
}
}
void boxBlurShared(uchar4* pDataIn, uchar4* pDataOut,
int width, int height, int blurSize, int blockDimX, int blockDimY)
{
// allocate device memory
unsigned int mem_size = sizeof(uchar4) * width * height;
uchar4* pDevDataIn;
uchar4* pDevDataOut;
hipError_t res;
res = hipMalloc((void **) &pDevDataIn, mem_size);
res = hipMalloc((void **) &pDevDataOut, mem_size);
// copy results from host to device
res = hipMemcpy(pDevDataIn, pDataIn, mem_size, hipMemcpyHostToDevice);
// define partitioning
dim3 threadsPerBlock(blockDimX, blockDimY);
dim3 numBlocks(width / threadsPerBlock.x + 1, height / threadsPerBlock.y + 1);
// run the cuda kernel
boxBlurSharedKernel<<<numBlocks, threadsPerBlock>>>(pDevDataIn, pDevDataOut,
width, height, blurSize);
// copy results from device to host
res = hipMemcpy(pDataOut, pDevDataOut, mem_size, hipMemcpyDeviceToHost);
// cleanup memory
res = hipFree(pDevDataIn);
res = hipFree(pDevDataOut);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
const int CachDim = 64;
__global__ void boxBlurSharedKernel(uchar4* pDataIn, uchar4* pDataOut,
int width, int height, int borderSize)
{
// get the position for the current thread
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// calculate the memory adress
const int tid = y * width + x;
__shared__ uchar4 cache[CachDim][CachDim];
uchar4 value = {127, 127, 127, 255};
int meanLength = 2 * borderSize + 1;
int denom = meanLength * meanLength;
for(int cy = threadIdx.y; cy < blockDim.y + meanLength; cy += blockDim.y)
for(int cx = threadIdx.x; cx < blockDim.x + meanLength; cx += blockDim.x)
{
int tmpX = (blockIdx.x * blockDim.x) + cx - borderSize;
int tmpY = (blockIdx.y * blockDim.y) + cy - borderSize;
if(tmpX >= 0 && tmpX < width
&& tmpY >= 0 && tmpY < height)
{
cache[cy][cx] = pDataIn[tmpY * width + tmpX];
}
}
__syncthreads();
if(x >= borderSize && y >= borderSize
&& x + borderSize < width && y + borderSize < height)
{
int3 sum = {0, 0, 0};
for(int cy = 0; cy < meanLength; cy++)
for(int cx = 0; cx < meanLength; cx++)
{
uchar4 locValue = cache[threadIdx.y + cy][threadIdx.x + cx];
sum.x += locValue.x;
sum.y += locValue.y;
sum.z += locValue.z;
}
value.x = (unsigned char)(sum.x / denom);
value.y = (unsigned char)(sum.y / denom);
value.z = (unsigned char)(sum.z / denom);
}
if(x < width && y < height)
{
// write the value back to the global memory
pDataOut[tid] = value;
}
}
void boxBlurShared(uchar4* pDataIn, uchar4* pDataOut,
int width, int height, int blurSize, int blockDimX, int blockDimY)
{
// allocate device memory
unsigned int mem_size = sizeof(uchar4) * width * height;
uchar4* pDevDataIn;
uchar4* pDevDataOut;
hipError_t res;
res = hipMalloc((void **) &pDevDataIn, mem_size);
res = hipMalloc((void **) &pDevDataOut, mem_size);
// copy results from host to device
res = hipMemcpy(pDevDataIn, pDataIn, mem_size, hipMemcpyHostToDevice);
// define partitioning
dim3 threadsPerBlock(blockDimX, blockDimY);
dim3 numBlocks(width / threadsPerBlock.x + 1, height / threadsPerBlock.y + 1);
// run the cuda kernel
boxBlurSharedKernel<<<numBlocks, threadsPerBlock>>>(pDevDataIn, pDevDataOut,
width, height, blurSize);
// copy results from device to host
res = hipMemcpy(pDataOut, pDevDataOut, mem_size, hipMemcpyDeviceToHost);
// cleanup memory
res = hipFree(pDevDataIn);
res = hipFree(pDevDataOut);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.globl _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.p2align 8
.type _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii,@function
_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s12, s[0:1], 0x18
s_load_b64 s[6:7], s[0:1], 0x10
v_bfe_u32 v4, v0, 10, 10
v_and_b32_e32 v1, 0x3ff, v0
s_mov_b32 s18, exec_lo
s_waitcnt lgkmcnt(0)
s_lshr_b32 s13, s2, 16
s_lshl_b32 s11, s12, 1
s_and_b32 s16, s2, 0xffff
s_or_b32 s10, s11, 1
s_mul_i32 s14, s14, s16
s_add_i32 s17, s10, s13
s_mul_i32 s15, s15, s13
v_cmpx_gt_u32_e64 s17, v4
s_cbranch_execz .LBB0_8
s_load_b64 s[8:9], s[0:1], 0x0
v_add_nc_u32_e32 v0, s15, v4
v_lshlrev_b32_e32 v2, 2, v1
s_add_i32 s19, s10, s16
s_sub_i32 s20, s15, s12
v_cmp_gt_u32_e32 vcc_lo, s19, v1
v_subrev_nc_u32_e32 v5, s12, v0
v_lshl_add_u32 v0, v4, 8, v2
s_lshl_b32 s21, s13, 8
s_lshl_b32 s22, s16, 2
s_sub_i32 s23, s14, s12
v_mad_u64_u32 v[2:3], null, s6, v5, v[1:2]
v_mov_b32_e32 v3, v4
s_mul_i32 s24, s6, s13
s_mov_b32 s25, 0
s_branch .LBB0_3
.LBB0_2:
s_set_inst_prefetch_distance 0x2
s_or_b32 exec_lo, exec_lo, s26
v_add_nc_u32_e32 v3, s13, v3
v_add_nc_u32_e32 v0, s21, v0
v_add_nc_u32_e32 v2, s24, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_le_u32_e64 s2, s17, v3
s_or_b32 s25, s2, s25
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s25
s_cbranch_execz .LBB0_8
.LBB0_3:
s_and_saveexec_b32 s26, vcc_lo
s_cbranch_execz .LBB0_2
v_dual_mov_b32 v6, v0 :: v_dual_add_nc_u32 v7, s20, v3
v_mov_b32_e32 v5, v2
s_mov_b32 s27, 0
s_delay_alu instid0(VALU_DEP_2)
v_cmp_lt_i32_e64 s2, -1, v7
v_cmp_gt_i32_e64 s3, s7, v7
v_mov_b32_e32 v7, v1
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_6
.p2align 6
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s5
v_add_nc_u32_e32 v7, s16, v7
v_add_nc_u32_e32 v6, s22, v6
v_add_nc_u32_e32 v5, s16, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_le_u32_e64 s4, s19, v7
s_or_b32 s27, s4, s27
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s27
s_cbranch_execz .LBB0_2
.LBB0_6:
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v8, s23, v7
v_cmp_gt_i32_e64 s4, s6, v8
v_cmp_lt_i32_e64 s5, -1, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s4, s2, s4
s_and_b32 s4, s3, s4
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s4, s5, s4
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s5, s4
s_cbranch_execz .LBB0_5
v_add_nc_u32_e32 v8, s23, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v9, 31, v8
v_lshlrev_b64 v[8:9], 2, v[8:9]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v8, s4, s8, v8
v_add_co_ci_u32_e64 v9, s4, s9, v9, s4
global_load_b32 v8, v[8:9], off
s_waitcnt vmcnt(0)
ds_store_b32 v6, v8
s_branch .LBB0_5
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s18
v_add_nc_u32_e32 v0, s14, v1
v_add_nc_u32_e32 v2, s15, v4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_add_nc_u32_e32 v5, s12, v0
v_mov_b32_e32 v7, 0x7f
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s6, v5
v_mov_b32_e32 v5, 0x7f
v_min_i32_e32 v3, v0, v2
v_cmp_le_i32_e32 vcc_lo, s12, v3
v_dual_mov_b32 v3, 0x7f :: v_dual_add_nc_u32 v6, s12, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e64 s3, s7, v6
v_mov_b32_e32 v6, 0xff
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, vcc_lo
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB0_16
v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v6, 0
v_mov_b32_e32 v5, 0
s_cmp_lt_i32 s12, 0
s_mov_b32 s2, 0
s_cbranch_scc1 .LBB0_15
v_dual_mov_b32 v6, 0 :: v_dual_lshlrev_b32 v1, 2, v1
v_mov_b32_e32 v3, 0
v_mov_b32_e32 v5, 0
s_add_i32 s3, s11, 1
s_delay_alu instid0(VALU_DEP_3)
v_lshl_add_u32 v1, v4, 8, v1
.p2align 6
.LBB0_11:
s_delay_alu instid0(VALU_DEP_1)
v_mov_b32_e32 v4, v1
s_mov_b32 s5, s3
.LBB0_12:
ds_load_u8 v7, v4
ds_load_u8 v8, v4 offset:1
ds_load_u8 v9, v4 offset:2
v_add_nc_u32_e32 v4, 4, v4
s_add_i32 s5, s5, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s5, 0
s_waitcnt lgkmcnt(2)
v_add_nc_u32_e32 v6, v6, v7
s_waitcnt lgkmcnt(1)
v_add_nc_u32_e32 v5, v5, v8
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v3, v3, v9
s_cbranch_scc0 .LBB0_12
v_add_nc_u32_e32 v1, 0x100, v1
s_add_i32 s5, s2, 1
s_cmp_eq_u32 s2, s11
s_cbranch_scc1 .LBB0_15
s_mov_b32 s2, s5
s_branch .LBB0_11
.LBB0_15:
s_mul_i32 s10, s10, s10
v_ashrrev_i32_e32 v7, 31, v6
v_cvt_f32_u32_e32 v1, s10
s_sub_i32 s2, 0, s10
v_ashrrev_i32_e32 v8, 31, v5
v_ashrrev_i32_e32 v9, 31, v3
v_add_nc_u32_e32 v6, v6, v7
v_rcp_iflag_f32_e32 v1, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v5, v5, v8
v_xor_b32_e32 v6, v6, v7
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v1, v1
v_mul_lo_u32 v4, s2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v4, v1, v4
v_add_nc_u32_e32 v1, v1, v4
v_xor_b32_e32 v4, v5, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v5, v6, v1
v_mul_hi_u32 v10, v4, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v11, v5, s10
v_mul_lo_u32 v12, v10, s10
v_add_nc_u32_e32 v14, 1, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_sub_nc_u32_e32 v6, v6, v11
v_sub_nc_u32_e32 v4, v4, v12
v_add_nc_u32_e32 v11, 1, v10
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s10, v6
v_subrev_nc_u32_e32 v12, s10, v6
v_add_nc_u32_e32 v3, v3, v9
v_cmp_le_u32_e64 s2, s10, v4
v_dual_cndmask_b32 v5, v5, v14 :: v_dual_cndmask_b32 v6, v6, v12
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_xor_b32_e32 v3, v3, v9
v_cndmask_b32_e64 v10, v10, v11, s2
v_subrev_nc_u32_e32 v11, s10, v4
s_delay_alu instid0(VALU_DEP_4)
v_add_nc_u32_e32 v12, 1, v5
v_cmp_le_u32_e32 vcc_lo, s10, v6
v_mov_b32_e32 v6, 0xff
v_mul_hi_u32 v1, v3, v1
v_cndmask_b32_e64 v4, v4, v11, s2
v_add_nc_u32_e32 v11, 1, v10
v_cndmask_b32_e32 v5, v5, v12, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_cmp_le_u32_e32 vcc_lo, s10, v4
v_mul_lo_u32 v13, v1, s10
v_dual_cndmask_b32 v4, v10, v11 :: v_dual_add_nc_u32 v15, 1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_xor_b32_e32 v4, v4, v8
v_sub_nc_u32_e32 v3, v3, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cmp_le_u32_e64 s3, s10, v3
v_subrev_nc_u32_e32 v13, s10, v3
v_cndmask_b32_e64 v1, v1, v15, s3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v3, v3, v13, s3
v_add_nc_u32_e32 v13, 1, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s10, v3
v_xor_b32_e32 v3, v5, v7
v_cndmask_b32_e32 v1, v1, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_sub_nc_u32_e32 v5, v3, v7
v_sub_nc_u32_e32 v7, v4, v8
v_xor_b32_e32 v1, v1, v9
s_delay_alu instid0(VALU_DEP_1)
v_sub_nc_u32_e32 v3, v1, v9
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s4
v_cmp_gt_i32_e32 vcc_lo, s6, v0
v_cmp_gt_i32_e64 s2, s7, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_18
s_load_b64 s[0:1], s[0:1], 0x8
v_mad_u64_u32 v[8:9], null, v2, s6, v[0:1]
v_and_b32_e32 v0, 0xff, v5
v_lshlrev_b16 v1, 8, v7
v_and_b32_e32 v2, 0xff, v3
v_lshlrev_b16 v3, 8, v6
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_or_b32_e32 v4, v0, v1
v_ashrrev_i32_e32 v9, 31, v8
v_or_b32_e32 v2, v2, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_and_b32_e32 v3, 0xffff, v4
v_lshlrev_b64 v[0:1], 2, v[8:9]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v2, 16, v2
v_or_b32_e32 v2, v3, v2
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_18:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.amdhsa_group_segment_fixed_size 16384
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 16
.amdhsa_next_free_sgpr 28
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii, .Lfunc_end0-_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 16384
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.private_segment_fixed_size: 0
.sgpr_count: 30
.sgpr_spill_count: 0
.symbol: _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 16
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
const int CachDim = 64;
__global__ void boxBlurSharedKernel(uchar4* pDataIn, uchar4* pDataOut,
int width, int height, int borderSize)
{
// get the position for the current thread
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// calculate the memory adress
const int tid = y * width + x;
__shared__ uchar4 cache[CachDim][CachDim];
uchar4 value = {127, 127, 127, 255};
int meanLength = 2 * borderSize + 1;
int denom = meanLength * meanLength;
for(int cy = threadIdx.y; cy < blockDim.y + meanLength; cy += blockDim.y)
for(int cx = threadIdx.x; cx < blockDim.x + meanLength; cx += blockDim.x)
{
int tmpX = (blockIdx.x * blockDim.x) + cx - borderSize;
int tmpY = (blockIdx.y * blockDim.y) + cy - borderSize;
if(tmpX >= 0 && tmpX < width
&& tmpY >= 0 && tmpY < height)
{
cache[cy][cx] = pDataIn[tmpY * width + tmpX];
}
}
__syncthreads();
if(x >= borderSize && y >= borderSize
&& x + borderSize < width && y + borderSize < height)
{
int3 sum = {0, 0, 0};
for(int cy = 0; cy < meanLength; cy++)
for(int cx = 0; cx < meanLength; cx++)
{
uchar4 locValue = cache[threadIdx.y + cy][threadIdx.x + cx];
sum.x += locValue.x;
sum.y += locValue.y;
sum.z += locValue.z;
}
value.x = (unsigned char)(sum.x / denom);
value.y = (unsigned char)(sum.y / denom);
value.z = (unsigned char)(sum.z / denom);
}
if(x < width && y < height)
{
// write the value back to the global memory
pDataOut[tid] = value;
}
}
void boxBlurShared(uchar4* pDataIn, uchar4* pDataOut,
int width, int height, int blurSize, int blockDimX, int blockDimY)
{
// allocate device memory
unsigned int mem_size = sizeof(uchar4) * width * height;
uchar4* pDevDataIn;
uchar4* pDevDataOut;
hipError_t res;
res = hipMalloc((void **) &pDevDataIn, mem_size);
res = hipMalloc((void **) &pDevDataOut, mem_size);
// copy results from host to device
res = hipMemcpy(pDevDataIn, pDataIn, mem_size, hipMemcpyHostToDevice);
// define partitioning
dim3 threadsPerBlock(blockDimX, blockDimY);
dim3 numBlocks(width / threadsPerBlock.x + 1, height / threadsPerBlock.y + 1);
// run the cuda kernel
boxBlurSharedKernel<<<numBlocks, threadsPerBlock>>>(pDevDataIn, pDevDataOut,
width, height, blurSize);
// copy results from device to host
res = hipMemcpy(pDataOut, pDevDataOut, mem_size, hipMemcpyDeviceToHost);
// cleanup memory
res = hipFree(pDevDataIn);
res = hipFree(pDevDataOut);
} | .text
.file "BoxBlurShared.hip"
.globl _Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii # -- Begin function _Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.p2align 4, 0x90
.type _Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii,@function
_Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii: # @_Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii, .Lfunc_end0-_Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.cfi_endproc
# -- End function
.globl _Z13boxBlurSharedP15HIP_vector_typeIhLj4EES1_iiiii # -- Begin function _Z13boxBlurSharedP15HIP_vector_typeIhLj4EES1_iiiii
.p2align 4, 0x90
.type _Z13boxBlurSharedP15HIP_vector_typeIhLj4EES1_iiiii,@function
_Z13boxBlurSharedP15HIP_vector_typeIhLj4EES1_iiiii: # @_Z13boxBlurSharedP15HIP_vector_typeIhLj4EES1_iiiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %r13d
movl %r8d, 24(%rsp) # 4-byte Spill
movl %ecx, %r15d
movl %edx, %r12d
movq %rsi, 40(%rsp) # 8-byte Spill
movq %rdi, %rbp
movl 208(%rsp), %ebx
movl %edx, %r14d
imull %ecx, %r14d
shll $2, %r14d
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %rbp, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r13d, %eax
movq %rbx, %rcx
shlq $32, %rcx
orq %rax, %rcx
movl %r12d, %eax
xorl %edx, %edx
divl %r13d
# kill: def $eax killed $eax def $rax
leal 1(%rax), %edi
movl %r15d, %eax
xorl %edx, %edx
divl %ebx
# kill: def $eax killed $eax def $rax
incl %eax
shlq $32, %rax
orq %rax, %rdi
movl $1, %esi
movq %rcx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movl %r12d, 36(%rsp)
movl %r15d, 32(%rsp)
movl 24(%rsp), %eax # 4-byte Reload
movl %eax, 28(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 28(%rsp), %rax
movq %rax, 144(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
movq 40(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z13boxBlurSharedP15HIP_vector_typeIhLj4EES1_iiiii, .Lfunc_end1-_Z13boxBlurSharedP15HIP_vector_typeIhLj4EES1_iiiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii,@object # @_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.section .rodata,"a",@progbits
.globl _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.p2align 3, 0x0
_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii:
.quad _Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.size _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii"
.size .L__unnamed_1, 55
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z19boxBlurSharedKernelP6uchar4S0_iii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e220000002200 */
/*0020*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff0a7624 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0040*/ BSSY B0, 0x2e0 ; /* 0x0000029000007945 */
/* 0x000fe20003800000 */
/*0050*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e620000002500 */
/*0060*/ IMAD.SHL.U32 R15, R10, 0x2, RZ ; /* 0x000000020a0f7824 */
/* 0x000fc600078e00ff */
/*0070*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e640000002100 */
/*0080*/ IADD3 R5, R15, 0x1, RZ ; /* 0x000000010f057810 */
/* 0x000fe40007ffe0ff */
/*0090*/ S2R R11, SR_CTAID.Y ; /* 0x00000000000b7919 */
/* 0x000ea40000002600 */
/*00a0*/ IADD3 R9, R5, c[0x0][0x4], RZ ; /* 0x0000010005097a10 */
/* 0x000fc80007ffe0ff */
/*00b0*/ ISETP.GE.U32.AND P0, PT, R2, R9, PT ; /* 0x000000090200720c */
/* 0x001fe20003f06070 */
/*00c0*/ IMAD R0, R7, c[0x0][0x0], R4 ; /* 0x0000000007007a24 */
/* 0x002fe400078e0204 */
/*00d0*/ IMAD R3, R11, c[0x0][0x4], R2 ; /* 0x000001000b037a24 */
/* 0x004fd400078e0202 */
/*00e0*/ @P0 BRA 0x2d0 ; /* 0x000001e000000947 */
/* 0x000fea0003800000 */
/*00f0*/ IMAD R8, R7, c[0x0][0x0], -R10.reuse ; /* 0x0000000007087a24 */
/* 0x100fe400078e0a0a */
/*0100*/ IMAD R10, R11, c[0x0][0x4], -R10 ; /* 0x000001000b0a7a24 */
/* 0x000fe200078e0a0a */
/*0110*/ IADD3.X R11, R15, c[0x0][0x0], RZ, PT, !PT ; /* 0x000000000f0b7a10 */
/* 0x000fe20003ffe4ff */
/*0120*/ IMAD.MOV.U32 R12, RZ, RZ, R2 ; /* 0x000000ffff0c7224 */
/* 0x000fc600078e0002 */
/*0130*/ ISETP.GE.U32.AND P0, PT, R4, R11, PT ; /* 0x0000000b0400720c */
/* 0x000fe20003f06070 */
/*0140*/ BSSY B1, 0x2a0 ; /* 0x0000015000017945 */
/* 0x000fd80003800000 */
/*0150*/ @P0 BRA 0x290 ; /* 0x0000013000000947 */
/* 0x001fea0003800000 */
/*0160*/ IMAD.IADD R14, R10, 0x1, R12 ; /* 0x000000010a0e7824 */
/* 0x000fe400078e020c */
/*0170*/ IMAD.MOV.U32 R13, RZ, RZ, R4 ; /* 0x000000ffff0d7224 */
/* 0x000fc800078e0004 */
/*0180*/ IMAD.IADD R17, R8, 0x1, R13 ; /* 0x0000000108117824 */
/* 0x001fe200078e020d */
/*0190*/ BSSY B2, 0x260 ; /* 0x000000c000027945 */
/* 0x000fe80003800000 */
/*01a0*/ LOP3.LUT R6, R17, R14, RZ, 0xfc, !PT ; /* 0x0000000e11067212 */
/* 0x000fc800078efcff */
/*01b0*/ ISETP.GE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fc80003f06270 */
/*01c0*/ ISETP.GE.OR P0, PT, R17, c[0x0][0x170], !P0 ; /* 0x00005c0011007a0c */
/* 0x000fc80004706670 */
/*01d0*/ ISETP.GE.OR P0, PT, R14, c[0x0][0x174], P0 ; /* 0x00005d000e007a0c */
/* 0x000fda0000706670 */
/*01e0*/ @P0 BRA 0x250 ; /* 0x0000006000000947 */
/* 0x000fea0003800000 */
/*01f0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fe400078e00ff */
/*0200*/ IMAD R6, R14, c[0x0][0x170], R17 ; /* 0x00005c000e067a24 */
/* 0x000fc800078e0211 */
/*0210*/ IMAD.WIDE R6, R6, R7, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fcc00078e0207 */
/*0220*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x000ea2000c1e1900 */
/*0230*/ IMAD R17, R12, 0x40, R13 ; /* 0x000000400c117824 */
/* 0x000fca00078e020d */
/*0240*/ STS [R17.X4], R6 ; /* 0x0000000611007388 */
/* 0x0041e40000004800 */
/*0250*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0260*/ IADD3 R13, R13, c[0x0][0x0], RZ ; /* 0x000000000d0d7a10 */
/* 0x000fc80007ffe0ff */
/*0270*/ ISETP.GE.U32.AND P0, PT, R13, R11, PT ; /* 0x0000000b0d00720c */
/* 0x000fda0003f06070 */
/*0280*/ @!P0 BRA 0x180 ; /* 0xfffffef000008947 */
/* 0x000fea000383ffff */
/*0290*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*02a0*/ IADD3 R12, R12, c[0x0][0x4], RZ ; /* 0x000001000c0c7a10 */
/* 0x000fc80007ffe0ff */
/*02b0*/ ISETP.GE.U32.AND P0, PT, R12, R9, PT ; /* 0x000000090c00720c */
/* 0x000fda0003f06070 */
/*02c0*/ @!P0 BRA 0x130 ; /* 0xfffffe6000008947 */
/* 0x000fea000383ffff */
/*02d0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*02e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*02f0*/ ISETP.GE.AND P0, PT, R0.reuse, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x040fe20003f06270 */
/*0300*/ IMAD.MOV.U32 R8, RZ, RZ, 0x7f ; /* 0x0000007fff087424 */
/* 0x000fe200078e00ff */
/*0310*/ IADD3 R6, R0, c[0x0][0x178], RZ ; /* 0x00005e0000067a10 */
/* 0x001fe40007ffe0ff */
/*0320*/ ISETP.LT.OR P0, PT, R3.reuse, c[0x0][0x178], !P0 ; /* 0x00005e0003007a0c */
/* 0x040fe20004701670 */
/*0330*/ BSSY B0, 0xb70 ; /* 0x0000083000007945 */
/* 0x000fe20003800000 */
/*0340*/ IADD3 R7, R3, c[0x0][0x178], RZ ; /* 0x00005e0003077a10 */
/* 0x000fe40007ffe0ff */
/*0350*/ ISETP.GE.OR P0, PT, R6, c[0x0][0x170], P0 ; /* 0x00005c0006007a0c */
/* 0x000fe20000706670 */
/*0360*/ IMAD.MOV.U32 R6, RZ, RZ, 0x7f ; /* 0x0000007fff067424 */
/* 0x000fc600078e00ff */
/*0370*/ ISETP.GE.OR P0, PT, R7, c[0x0][0x174], P0 ; /* 0x00005d0007007a0c */
/* 0x000fe20000706670 */
/*0380*/ IMAD.MOV.U32 R7, RZ, RZ, 0x7f ; /* 0x0000007fff077424 */
/* 0x000fd800078e00ff */
/*0390*/ @P0 BRA 0xb60 ; /* 0x000007c000000947 */
/* 0x000fea0003800000 */
/*03a0*/ ISETP.LE.AND P0, PT, RZ, c[0x0][0x178], PT ; /* 0x00005e00ff007a0c */
/* 0x000fe20003f03270 */
/*03b0*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x000fe200078e00ff */
/*03c0*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fd6000001ff00 */
/*03d0*/ @!P0 BRA 0x810 ; /* 0x0000043000008947 */
/* 0x000fea0003800000 */
/*03e0*/ IMNMX R22, RZ, R15, !PT ; /* 0x0000000fff167217 */
/* 0x000fe20007800200 */
/*03f0*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0400*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x000fe200078e00ff */
/*0410*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0420*/ IADD3 R10, R22, 0x1, RZ ; /* 0x00000001160a7810 */
/* 0x000fc80007ffe0ff */
/*0430*/ LOP3.LUT R9, R10, 0x3, RZ, 0xc0, !PT ; /* 0x000000030a097812 */
/* 0x000fca00078ec0ff */
/*0440*/ IMAD.IADD R11, R10, 0x1, -R9 ; /* 0x000000010a0b7824 */
/* 0x000fe400078e0a09 */
/*0450*/ ISETP.GE.U32.AND P0, PT, R22, 0x3, PT ; /* 0x000000031600780c */
/* 0x000fe20003f06070 */
/*0460*/ UMOV UR5, URZ ; /* 0x0000003f00057c82 */
/* 0x000fe20008000000 */
/*0470*/ IADD3 R12, R2, UR4, RZ ; /* 0x00000004020c7c10 */
/* 0x000fd6000fffe0ff */
/*0480*/ @!P0 BRA 0x670 ; /* 0x000001e000008947 */
/* 0x000fea0003800000 */
/*0490*/ IMAD.MOV.U32 R10, RZ, RZ, R11 ; /* 0x000000ffff0a7224 */
/* 0x000fe200078e000b */
/*04a0*/ UMOV UR5, URZ ; /* 0x0000003f00057c82 */
/* 0x000fe40008000000 */
/*04b0*/ IADD3 R13, R4, UR5, RZ ; /* 0x00000005040d7c10 */
/* 0x000fe2000fffe0ff */
/*04c0*/ UIADD3 UR5, UR5, 0x4, URZ ; /* 0x0000000405057890 */
/* 0x000fe2000fffe03f */
/*04d0*/ IADD3 R10, R10, -0x4, RZ ; /* 0xfffffffc0a0a7810 */
/* 0x000fc60007ffe0ff */
/*04e0*/ IMAD R23, R12, 0x40, R13 ; /* 0x000000400c177824 */
/* 0x000fe200078e020d */
/*04f0*/ ISETP.NE.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fc80003f05270 */
/*0500*/ LDS R16, [R23.X4] ; /* 0x0000000017107984 */
/* 0x000e280000004800 */
/*0510*/ LDS R21, [R23.X4+0x4] ; /* 0x0000040017157984 */
/* 0x000e680000004800 */
/*0520*/ LDS R14, [R23.X4+0x8] ; /* 0x00000800170e7984 */
/* 0x000ea80000004800 */
/*0530*/ LDS R13, [R23.X4+0xc] ; /* 0x00000c00170d7984 */
/* 0x000ee20000004800 */
/*0540*/ PRMT R17, R16, 0x7770, RZ ; /* 0x0000777010117816 */
/* 0x001fc400000000ff */
/*0550*/ PRMT R19, R16.reuse, 0x7771, RZ ; /* 0x0000777110137816 */
/* 0x040fe400000000ff */
/*0560*/ PRMT R18, R21.reuse, 0x7770, RZ ; /* 0x0000777015127816 */
/* 0x042fe400000000ff */
/*0570*/ PRMT R20, R21.reuse, 0x7771, RZ ; /* 0x0000777115147816 */
/* 0x040fe400000000ff */
/*0580*/ PRMT R16, R16, 0x7772, RZ ; /* 0x0000777210107816 */
/* 0x000fe400000000ff */
/*0590*/ PRMT R21, R21, 0x7772, RZ ; /* 0x0000777215157816 */
/* 0x000fe400000000ff */
/*05a0*/ IADD3 R6, R18, R6, R17 ; /* 0x0000000612067210 */
/* 0x000fc40007ffe011 */
/*05b0*/ IADD3 R8, R20, R8, R19 ; /* 0x0000000814087210 */
/* 0x000fe40007ffe013 */
/*05c0*/ IADD3 R7, R21, R7, R16 ; /* 0x0000000715077210 */
/* 0x000fe40007ffe010 */
/*05d0*/ PRMT R17, R14.reuse, 0x7770, RZ ; /* 0x000077700e117816 */
/* 0x044fe400000000ff */
/*05e0*/ PRMT R19, R14, 0x7771, RZ ; /* 0x000077710e137816 */
/* 0x000fe400000000ff */
/*05f0*/ PRMT R16, R13.reuse, 0x7770, RZ ; /* 0x000077700d107816 */
/* 0x048fe400000000ff */
/*0600*/ PRMT R18, R13, 0x7771, RZ ; /* 0x000077710d127816 */
/* 0x000fc400000000ff */
/*0610*/ PRMT R14, R14, 0x7772, RZ ; /* 0x000077720e0e7816 */
/* 0x000fe400000000ff */
/*0620*/ PRMT R13, R13, 0x7772, RZ ; /* 0x000077720d0d7816 */
/* 0x000fe400000000ff */
/*0630*/ IADD3 R6, R16, R6, R17 ; /* 0x0000000610067210 */
/* 0x000fe40007ffe011 */
/*0640*/ IADD3 R8, R18, R8, R19 ; /* 0x0000000812087210 */
/* 0x000fe40007ffe013 */
/*0650*/ IADD3 R7, R13, R7, R14 ; /* 0x000000070d077210 */
/* 0x000fe20007ffe00e */
/*0660*/ @P0 BRA 0x4b0 ; /* 0xfffffe4000000947 */
/* 0x000fea000383ffff */
/*0670*/ IADD3 R13, R4, UR5, RZ ; /* 0x00000005040d7c10 */
/* 0x000fe4000fffe0ff */
/*0680*/ ISETP.NE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */
/* 0x000fc40003f05270 */
/*0690*/ ISETP.LE.AND P1, PT, R15, UR4, PT ; /* 0x000000040f007c0c */
/* 0x000fe2000bf23270 */
/*06a0*/ IMAD R13, R12, 0x40, R13 ; /* 0x000000400c0d7824 */
/* 0x000fe200078e020d */
/*06b0*/ UIADD3 UR4, UR4, 0x1, URZ ; /* 0x0000000104047890 */
/* 0x000fc6000fffe03f */
/*06c0*/ IMAD.SHL.U32 R18, R13, 0x4, RZ ; /* 0x000000040d127824 */
/* 0x000fca00078e00ff */
/*06d0*/ LDS R10, [R18] ; /* 0x00000000120a7984 */
/* 0x000e240000000800 */
/*06e0*/ PRMT R13, R10.reuse, 0x7770, RZ ; /* 0x000077700a0d7816 */
/* 0x041fe400000000ff */
/*06f0*/ PRMT R17, R10.reuse, 0x7771, RZ ; /* 0x000077710a117816 */
/* 0x040fe400000000ff */
/*0700*/ PRMT R10, R10, 0x7772, RZ ; /* 0x000077720a0a7816 */
/* 0x000fe200000000ff */
/*0710*/ IMAD.IADD R6, R13, 0x1, R6 ; /* 0x000000010d067824 */
/* 0x000fe400078e0206 */
/*0720*/ IMAD.IADD R8, R17, 0x1, R8 ; /* 0x0000000111087824 */
/* 0x000fe400078e0208 */
/*0730*/ IMAD.IADD R7, R10, 0x1, R7 ; /* 0x000000010a077824 */
/* 0x000fe200078e0207 */
/*0740*/ @!P0 BRA 0x800 ; /* 0x000000b000008947 */
/* 0x000fea0003800000 */
/*0750*/ LDS R10, [R18+0x4] ; /* 0x00000400120a7984 */
/* 0x000e280000000800 */
/*0760*/ LDS R16, [R18+0x8] ; /* 0x0000080012107984 */
/* 0x000e620000000800 */
/*0770*/ PRMT R13, R10, 0x7770, RZ ; /* 0x000077700a0d7816 */
/* 0x001fc400000000ff */
/*0780*/ PRMT R17, R10, 0x7771, RZ ; /* 0x000077710a117816 */
/* 0x000fe400000000ff */
/*0790*/ PRMT R12, R16.reuse, 0x7770, RZ ; /* 0x00007770100c7816 */
/* 0x042fe400000000ff */
/*07a0*/ PRMT R14, R16, 0x7771, RZ ; /* 0x00007771100e7816 */
/* 0x000fe400000000ff */
/*07b0*/ PRMT R10, R10, 0x7772, RZ ; /* 0x000077720a0a7816 */
/* 0x000fe400000000ff */
/*07c0*/ PRMT R16, R16, 0x7772, RZ ; /* 0x0000777210107816 */
/* 0x000fe400000000ff */
/*07d0*/ IADD3 R6, R12, R6, R13 ; /* 0x000000060c067210 */
/* 0x000fc40007ffe00d */
/*07e0*/ IADD3 R8, R14, R8, R17 ; /* 0x000000080e087210 */
/* 0x000fe40007ffe011 */
/*07f0*/ IADD3 R7, R16, R7, R10 ; /* 0x0000000710077210 */
/* 0x000fe40007ffe00a */
/*0800*/ @!P1 BRA 0x450 ; /* 0xfffffc4000009947 */
/* 0x000fea000383ffff */
/*0810*/ IMAD R5, R5, R5, RZ ; /* 0x0000000505057224 */
/* 0x000fe200078e02ff */
/*0820*/ IABS R12, R6 ; /* 0x00000006000c7213 */
/* 0x000fe40000000000 */
/*0830*/ IABS R14, R8 ; /* 0x00000008000e7213 */
/* 0x000fe40000000000 */
/*0840*/ IABS R2, R5.reuse ; /* 0x0000000500027213 */
/* 0x080fe40000000000 */
/*0850*/ IABS R13, R5 ; /* 0x00000005000d7213 */
/* 0x000fe40000000000 */
/*0860*/ I2F.RP R4, R2 ; /* 0x0000000200047306 */
/* 0x000e220000209400 */
/*0870*/ IABS R16, R7 ; /* 0x0000000700107213 */
/* 0x000fc40000000000 */
/*0880*/ IMAD.MOV R13, RZ, RZ, -R13 ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e0a0d */
/*0890*/ LOP3.LUT R6, R6, R5.reuse, RZ, 0x3c, !PT ; /* 0x0000000506067212 */
/* 0x080fe400078e3cff */
/*08a0*/ LOP3.LUT R8, R8, R5.reuse, RZ, 0x3c, !PT ; /* 0x0000000508087212 */
/* 0x080fe400078e3cff */
/*08b0*/ LOP3.LUT R7, R7, R5, RZ, 0x3c, !PT ; /* 0x0000000507077212 */
/* 0x000fe400078e3cff */
/*08c0*/ ISETP.GE.AND P4, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f86270 */
/*08d0*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x001e240000001000 */
/*08e0*/ IADD3 R10, R4, 0xffffffe, RZ ; /* 0x0ffffffe040a7810 */
/* 0x001fcc0007ffe0ff */
/*08f0*/ F2I.FTZ.U32.TRUNC.NTZ R11, R10 ; /* 0x0000000a000b7305 */
/* 0x000064000021f000 */
/*0900*/ IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a7224 */
/* 0x001fe400078e00ff */
/*0910*/ IMAD.MOV R9, RZ, RZ, -R11 ; /* 0x000000ffff097224 */
/* 0x002fc800078e0a0b */
/*0920*/ IMAD R9, R9, R2, RZ ; /* 0x0000000209097224 */
/* 0x000fc800078e02ff */
/*0930*/ IMAD.HI.U32 R11, R11, R9, R10 ; /* 0x000000090b0b7227 */
/* 0x000fc800078e000a */
/*0940*/ IMAD.MOV.U32 R9, RZ, RZ, R12 ; /* 0x000000ffff097224 */
/* 0x000fe400078e000c */
/*0950*/ IMAD.HI.U32 R10, R11, R14, RZ ; /* 0x0000000e0b0a7227 */
/* 0x000fc800078e00ff */
/*0960*/ IMAD.HI.U32 R4, R11, R9, RZ ; /* 0x000000090b047227 */
/* 0x000fc800078e00ff */
/*0970*/ IMAD.HI.U32 R12, R11, R16, RZ ; /* 0x000000100b0c7227 */
/* 0x000fc800078e00ff */
/*0980*/ IMAD R9, R4, R13.reuse, R9 ; /* 0x0000000d04097224 */
/* 0x080fe400078e0209 */
/*0990*/ IMAD R11, R10, R13.reuse, R14 ; /* 0x0000000d0a0b7224 */
/* 0x080fe400078e020e */
/*09a0*/ IMAD R13, R12, R13, R16 ; /* 0x0000000d0c0d7224 */
/* 0x000fe200078e0210 */
/*09b0*/ ISETP.GT.U32.AND P5, PT, R2.reuse, R9, PT ; /* 0x000000090200720c */
/* 0x040fe40003fa4070 */
/*09c0*/ ISETP.GT.U32.AND P6, PT, R2.reuse, R11, PT ; /* 0x0000000b0200720c */
/* 0x040fe40003fc4070 */
/*09d0*/ ISETP.GT.U32.AND P0, PT, R2, R13, PT ; /* 0x0000000d0200720c */
/* 0x000fd20003f04070 */
/*09e0*/ @!P5 IMAD.IADD R9, R9, 0x1, -R2.reuse ; /* 0x000000010909d824 */
/* 0x100fe200078e0a02 */
/*09f0*/ @!P5 IADD3 R4, R4, 0x1, RZ ; /* 0x000000010404d810 */
/* 0x000fe20007ffe0ff */
/*0a00*/ @!P6 IMAD.IADD R11, R11, 0x1, -R2.reuse ; /* 0x000000010b0be824 */
/* 0x100fe200078e0a02 */
/*0a10*/ ISETP.GE.AND P5, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003fa6270 */
/*0a20*/ @!P0 IMAD.IADD R13, R13, 0x1, -R2 ; /* 0x000000010d0d8824 */
/* 0x000fe200078e0a02 */
/*0a30*/ ISETP.GE.U32.AND P1, PT, R9, R2.reuse, PT ; /* 0x000000020900720c */
/* 0x080fe40003f26070 */
/*0a40*/ ISETP.GE.U32.AND P2, PT, R11, R2.reuse, PT ; /* 0x000000020b00720c */
/* 0x080fe40003f46070 */
/*0a50*/ ISETP.GE.U32.AND P3, PT, R13, R2, PT ; /* 0x000000020d00720c */
/* 0x000fe40003f66070 */
/*0a60*/ @!P6 IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0ae810 */
/* 0x000fc40007ffe0ff */
/*0a70*/ ISETP.GE.AND P6, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe40003fc6270 */
/*0a80*/ @!P0 IADD3 R12, R12, 0x1, RZ ; /* 0x000000010c0c8810 */
/* 0x000fe40007ffe0ff */
/*0a90*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe40003f05270 */
/*0aa0*/ @P1 IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104041810 */
/* 0x000fe40007ffe0ff */
/*0ab0*/ @P2 IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0a2810 */
/* 0x000fe40007ffe0ff */
/*0ac0*/ @P3 IADD3 R12, R12, 0x1, RZ ; /* 0x000000010c0c3810 */
/* 0x000fe20007ffe0ff */
/*0ad0*/ @!P4 IMAD.MOV R4, RZ, RZ, -R4 ; /* 0x000000ffff04c224 */
/* 0x000fe200078e0a04 */
/*0ae0*/ LOP3.LUT R5, RZ, R5, RZ, 0x33, !PT ; /* 0x00000005ff057212 */
/* 0x000fe200078e33ff */
/*0af0*/ @!P5 IMAD.MOV R10, RZ, RZ, -R10 ; /* 0x000000ffff0ad224 */
/* 0x000fc400078e0a0a */
/*0b00*/ @!P6 IMAD.MOV R12, RZ, RZ, -R12 ; /* 0x000000ffff0ce224 */
/* 0x000fe200078e0a0c */
/*0b10*/ SEL R4, R5.reuse, R4, !P0 ; /* 0x0000000405047207 */
/* 0x040fe40004000000 */
/*0b20*/ SEL R7, R5.reuse, R10, !P0 ; /* 0x0000000a05077207 */
/* 0x040fe40004000000 */
/*0b30*/ SEL R5, R5, R12, !P0 ; /* 0x0000000c05057207 */
/* 0x000fe40004000000 */
/*0b40*/ PRMT R6, R4, 0x7610, R6 ; /* 0x0000761004067816 */
/* 0x000fe40000000006 */
/*0b50*/ PRMT R8, R5, 0x7610, R8 ; /* 0x0000761005087816 */
/* 0x000fe40000000008 */
/*0b60*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0b70*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */
/* 0x000fc80003f06270 */
/*0b80*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fda0000706670 */
/*0b90*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0ba0*/ PRMT R7, R7, 0x7604, R6 ; /* 0x0000760407077816 */
/* 0x000fe20000000006 */
/*0bb0*/ IMAD R3, R3, c[0x0][0x170], R0 ; /* 0x00005c0003037a24 */
/* 0x000fe400078e0200 */
/*0bc0*/ IMAD.MOV.U32 R0, RZ, RZ, 0xff ; /* 0x000000ffff007424 */
/* 0x000fe200078e00ff */
/*0bd0*/ PRMT R7, R8, 0x7054, R7 ; /* 0x0000705408077816 */
/* 0x000fe20000000007 */
/*0be0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fc600078e00ff */
/*0bf0*/ PRMT R7, R0, 0x654, R7 ; /* 0x0000065400077816 */
/* 0x000fe20000000007 */
/*0c00*/ IMAD.WIDE R2, R3, R2, c[0x0][0x168] ; /* 0x00005a0003027625 */
/* 0x000fca00078e0202 */
/*0c10*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101906 */
/*0c20*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c30*/ BRA 0xc30; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.globl _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.p2align 8
.type _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii,@function
_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s12, s[0:1], 0x18
s_load_b64 s[6:7], s[0:1], 0x10
v_bfe_u32 v4, v0, 10, 10
v_and_b32_e32 v1, 0x3ff, v0
s_mov_b32 s18, exec_lo
s_waitcnt lgkmcnt(0)
s_lshr_b32 s13, s2, 16
s_lshl_b32 s11, s12, 1
s_and_b32 s16, s2, 0xffff
s_or_b32 s10, s11, 1
s_mul_i32 s14, s14, s16
s_add_i32 s17, s10, s13
s_mul_i32 s15, s15, s13
v_cmpx_gt_u32_e64 s17, v4
s_cbranch_execz .LBB0_8
s_load_b64 s[8:9], s[0:1], 0x0
v_add_nc_u32_e32 v0, s15, v4
v_lshlrev_b32_e32 v2, 2, v1
s_add_i32 s19, s10, s16
s_sub_i32 s20, s15, s12
v_cmp_gt_u32_e32 vcc_lo, s19, v1
v_subrev_nc_u32_e32 v5, s12, v0
v_lshl_add_u32 v0, v4, 8, v2
s_lshl_b32 s21, s13, 8
s_lshl_b32 s22, s16, 2
s_sub_i32 s23, s14, s12
v_mad_u64_u32 v[2:3], null, s6, v5, v[1:2]
v_mov_b32_e32 v3, v4
s_mul_i32 s24, s6, s13
s_mov_b32 s25, 0
s_branch .LBB0_3
.LBB0_2:
s_set_inst_prefetch_distance 0x2
s_or_b32 exec_lo, exec_lo, s26
v_add_nc_u32_e32 v3, s13, v3
v_add_nc_u32_e32 v0, s21, v0
v_add_nc_u32_e32 v2, s24, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_le_u32_e64 s2, s17, v3
s_or_b32 s25, s2, s25
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s25
s_cbranch_execz .LBB0_8
.LBB0_3:
s_and_saveexec_b32 s26, vcc_lo
s_cbranch_execz .LBB0_2
v_dual_mov_b32 v6, v0 :: v_dual_add_nc_u32 v7, s20, v3
v_mov_b32_e32 v5, v2
s_mov_b32 s27, 0
s_delay_alu instid0(VALU_DEP_2)
v_cmp_lt_i32_e64 s2, -1, v7
v_cmp_gt_i32_e64 s3, s7, v7
v_mov_b32_e32 v7, v1
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_6
.p2align 6
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s5
v_add_nc_u32_e32 v7, s16, v7
v_add_nc_u32_e32 v6, s22, v6
v_add_nc_u32_e32 v5, s16, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_le_u32_e64 s4, s19, v7
s_or_b32 s27, s4, s27
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s27
s_cbranch_execz .LBB0_2
.LBB0_6:
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v8, s23, v7
v_cmp_gt_i32_e64 s4, s6, v8
v_cmp_lt_i32_e64 s5, -1, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s4, s2, s4
s_and_b32 s4, s3, s4
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s4, s5, s4
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s5, s4
s_cbranch_execz .LBB0_5
v_add_nc_u32_e32 v8, s23, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v9, 31, v8
v_lshlrev_b64 v[8:9], 2, v[8:9]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v8, s4, s8, v8
v_add_co_ci_u32_e64 v9, s4, s9, v9, s4
global_load_b32 v8, v[8:9], off
s_waitcnt vmcnt(0)
ds_store_b32 v6, v8
s_branch .LBB0_5
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s18
v_add_nc_u32_e32 v0, s14, v1
v_add_nc_u32_e32 v2, s15, v4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_add_nc_u32_e32 v5, s12, v0
v_mov_b32_e32 v7, 0x7f
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s6, v5
v_mov_b32_e32 v5, 0x7f
v_min_i32_e32 v3, v0, v2
v_cmp_le_i32_e32 vcc_lo, s12, v3
v_dual_mov_b32 v3, 0x7f :: v_dual_add_nc_u32 v6, s12, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e64 s3, s7, v6
v_mov_b32_e32 v6, 0xff
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, vcc_lo
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB0_16
v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v6, 0
v_mov_b32_e32 v5, 0
s_cmp_lt_i32 s12, 0
s_mov_b32 s2, 0
s_cbranch_scc1 .LBB0_15
v_dual_mov_b32 v6, 0 :: v_dual_lshlrev_b32 v1, 2, v1
v_mov_b32_e32 v3, 0
v_mov_b32_e32 v5, 0
s_add_i32 s3, s11, 1
s_delay_alu instid0(VALU_DEP_3)
v_lshl_add_u32 v1, v4, 8, v1
.p2align 6
.LBB0_11:
s_delay_alu instid0(VALU_DEP_1)
v_mov_b32_e32 v4, v1
s_mov_b32 s5, s3
.LBB0_12:
ds_load_u8 v7, v4
ds_load_u8 v8, v4 offset:1
ds_load_u8 v9, v4 offset:2
v_add_nc_u32_e32 v4, 4, v4
s_add_i32 s5, s5, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s5, 0
s_waitcnt lgkmcnt(2)
v_add_nc_u32_e32 v6, v6, v7
s_waitcnt lgkmcnt(1)
v_add_nc_u32_e32 v5, v5, v8
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v3, v3, v9
s_cbranch_scc0 .LBB0_12
v_add_nc_u32_e32 v1, 0x100, v1
s_add_i32 s5, s2, 1
s_cmp_eq_u32 s2, s11
s_cbranch_scc1 .LBB0_15
s_mov_b32 s2, s5
s_branch .LBB0_11
.LBB0_15:
s_mul_i32 s10, s10, s10
v_ashrrev_i32_e32 v7, 31, v6
v_cvt_f32_u32_e32 v1, s10
s_sub_i32 s2, 0, s10
v_ashrrev_i32_e32 v8, 31, v5
v_ashrrev_i32_e32 v9, 31, v3
v_add_nc_u32_e32 v6, v6, v7
v_rcp_iflag_f32_e32 v1, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v5, v5, v8
v_xor_b32_e32 v6, v6, v7
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v1, v1
v_mul_lo_u32 v4, s2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v4, v1, v4
v_add_nc_u32_e32 v1, v1, v4
v_xor_b32_e32 v4, v5, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v5, v6, v1
v_mul_hi_u32 v10, v4, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v11, v5, s10
v_mul_lo_u32 v12, v10, s10
v_add_nc_u32_e32 v14, 1, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_sub_nc_u32_e32 v6, v6, v11
v_sub_nc_u32_e32 v4, v4, v12
v_add_nc_u32_e32 v11, 1, v10
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s10, v6
v_subrev_nc_u32_e32 v12, s10, v6
v_add_nc_u32_e32 v3, v3, v9
v_cmp_le_u32_e64 s2, s10, v4
v_dual_cndmask_b32 v5, v5, v14 :: v_dual_cndmask_b32 v6, v6, v12
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_xor_b32_e32 v3, v3, v9
v_cndmask_b32_e64 v10, v10, v11, s2
v_subrev_nc_u32_e32 v11, s10, v4
s_delay_alu instid0(VALU_DEP_4)
v_add_nc_u32_e32 v12, 1, v5
v_cmp_le_u32_e32 vcc_lo, s10, v6
v_mov_b32_e32 v6, 0xff
v_mul_hi_u32 v1, v3, v1
v_cndmask_b32_e64 v4, v4, v11, s2
v_add_nc_u32_e32 v11, 1, v10
v_cndmask_b32_e32 v5, v5, v12, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_cmp_le_u32_e32 vcc_lo, s10, v4
v_mul_lo_u32 v13, v1, s10
v_dual_cndmask_b32 v4, v10, v11 :: v_dual_add_nc_u32 v15, 1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_xor_b32_e32 v4, v4, v8
v_sub_nc_u32_e32 v3, v3, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cmp_le_u32_e64 s3, s10, v3
v_subrev_nc_u32_e32 v13, s10, v3
v_cndmask_b32_e64 v1, v1, v15, s3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v3, v3, v13, s3
v_add_nc_u32_e32 v13, 1, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s10, v3
v_xor_b32_e32 v3, v5, v7
v_cndmask_b32_e32 v1, v1, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_sub_nc_u32_e32 v5, v3, v7
v_sub_nc_u32_e32 v7, v4, v8
v_xor_b32_e32 v1, v1, v9
s_delay_alu instid0(VALU_DEP_1)
v_sub_nc_u32_e32 v3, v1, v9
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s4
v_cmp_gt_i32_e32 vcc_lo, s6, v0
v_cmp_gt_i32_e64 s2, s7, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_18
s_load_b64 s[0:1], s[0:1], 0x8
v_mad_u64_u32 v[8:9], null, v2, s6, v[0:1]
v_and_b32_e32 v0, 0xff, v5
v_lshlrev_b16 v1, 8, v7
v_and_b32_e32 v2, 0xff, v3
v_lshlrev_b16 v3, 8, v6
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_or_b32_e32 v4, v0, v1
v_ashrrev_i32_e32 v9, 31, v8
v_or_b32_e32 v2, v2, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_and_b32_e32 v3, 0xffff, v4
v_lshlrev_b64 v[0:1], 2, v[8:9]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v2, 16, v2
v_or_b32_e32 v2, v3, v2
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_18:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.amdhsa_group_segment_fixed_size 16384
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 16
.amdhsa_next_free_sgpr 28
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii, .Lfunc_end0-_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 16384
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.private_segment_fixed_size: 0
.sgpr_count: 30
.sgpr_spill_count: 0
.symbol: _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 16
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000f42e4_00000000-6_BoxBlurShared.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z51__device_stub__Z19boxBlurSharedKernelP6uchar4S0_iiiP6uchar4S0_iii
.type _Z51__device_stub__Z19boxBlurSharedKernelP6uchar4S0_iiiP6uchar4S0_iii, @function
_Z51__device_stub__Z19boxBlurSharedKernelP6uchar4S0_iiiP6uchar4S0_iii:
.LFB2052:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19boxBlurSharedKernelP6uchar4S0_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z51__device_stub__Z19boxBlurSharedKernelP6uchar4S0_iiiP6uchar4S0_iii, .-_Z51__device_stub__Z19boxBlurSharedKernelP6uchar4S0_iiiP6uchar4S0_iii
.globl _Z19boxBlurSharedKernelP6uchar4S0_iii
.type _Z19boxBlurSharedKernelP6uchar4S0_iii, @function
_Z19boxBlurSharedKernelP6uchar4S0_iii:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z19boxBlurSharedKernelP6uchar4S0_iiiP6uchar4S0_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z19boxBlurSharedKernelP6uchar4S0_iii, .-_Z19boxBlurSharedKernelP6uchar4S0_iii
.globl _Z13boxBlurSharedP6uchar4S0_iiiii
.type _Z13boxBlurSharedP6uchar4S0_iiiii, @function
_Z13boxBlurSharedP6uchar4S0_iiiii:
.LFB2027:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdi, %r15
movq %rsi, (%rsp)
movl %edx, %ebx
movl %ecx, %ebp
movl %r8d, 12(%rsp)
movl %r9d, %r12d
movl 128(%rsp), %r14d
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl %edx, %eax
imull %ecx, %eax
leal 0(,%rax,4), %r13d
leaq 16(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r13, %rdx
movq %r15, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebx, %eax
movl $0, %edx
divl %r12d
addl $1, %eax
movl %eax, 44(%rsp)
movl %ebp, %eax
movl $0, %edx
divl %r14d
addl $1, %eax
movl %eax, 48(%rsp)
movl %r12d, 32(%rsp)
movl %r14d, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movl $1, %ecx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
movl $2, %ecx
movq %r13, %rdx
movq 24(%rsp), %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movl 12(%rsp), %r8d
movl %ebp, %ecx
movl %ebx, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z51__device_stub__Z19boxBlurSharedKernelP6uchar4S0_iiiP6uchar4S0_iii
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size _Z13boxBlurSharedP6uchar4S0_iiiii, .-_Z13boxBlurSharedP6uchar4S0_iiiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z19boxBlurSharedKernelP6uchar4S0_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19boxBlurSharedKernelP6uchar4S0_iii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "BoxBlurShared.hip"
.globl _Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii # -- Begin function _Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.p2align 4, 0x90
.type _Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii,@function
_Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii: # @_Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii, .Lfunc_end0-_Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.cfi_endproc
# -- End function
.globl _Z13boxBlurSharedP15HIP_vector_typeIhLj4EES1_iiiii # -- Begin function _Z13boxBlurSharedP15HIP_vector_typeIhLj4EES1_iiiii
.p2align 4, 0x90
.type _Z13boxBlurSharedP15HIP_vector_typeIhLj4EES1_iiiii,@function
_Z13boxBlurSharedP15HIP_vector_typeIhLj4EES1_iiiii: # @_Z13boxBlurSharedP15HIP_vector_typeIhLj4EES1_iiiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %r13d
movl %r8d, 24(%rsp) # 4-byte Spill
movl %ecx, %r15d
movl %edx, %r12d
movq %rsi, 40(%rsp) # 8-byte Spill
movq %rdi, %rbp
movl 208(%rsp), %ebx
movl %edx, %r14d
imull %ecx, %r14d
shll $2, %r14d
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %rbp, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r13d, %eax
movq %rbx, %rcx
shlq $32, %rcx
orq %rax, %rcx
movl %r12d, %eax
xorl %edx, %edx
divl %r13d
# kill: def $eax killed $eax def $rax
leal 1(%rax), %edi
movl %r15d, %eax
xorl %edx, %edx
divl %ebx
# kill: def $eax killed $eax def $rax
incl %eax
shlq $32, %rax
orq %rax, %rdi
movl $1, %esi
movq %rcx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movl %r12d, 36(%rsp)
movl %r15d, 32(%rsp)
movl 24(%rsp), %eax # 4-byte Reload
movl %eax, 28(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 28(%rsp), %rax
movq %rax, 144(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
movq 40(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z13boxBlurSharedP15HIP_vector_typeIhLj4EES1_iiiii, .Lfunc_end1-_Z13boxBlurSharedP15HIP_vector_typeIhLj4EES1_iiiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii,@object # @_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.section .rodata,"a",@progbits
.globl _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.p2align 3, 0x0
_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii:
.quad _Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.size _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii"
.size .L__unnamed_1, 55
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19boxBlurSharedKernelP15HIP_vector_typeIhLj4EES1_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<bits/stdc++.h>
#include<device_launch_parameters.h>
#include<cuda_runtime.h>
using namespace std;
#define SIZE 256
#define SSIZE SIZE*4 // sizeof(int)
__global__ void sum_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void max_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = max(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void variance(int *v,int *v_r,float *mean){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
partial_sum[threadIdx.x] = (partial_sum[threadIdx.x] - *mean) * (partial_sum[threadIdx.x] - *mean);
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void min_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = min(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
void inititialise(int* v,int n){
for(int i =0;i<n;i++){
v[i]= rand()%1000;
}
}
int main(){
int n = SIZE*SIZE;
float elapsed_cpu, elapsed_gpu;
clock_t t1, t2;
int thread_block_size = SIZE;
int num_blocks = n / thread_block_size;
int *h_v,*d_v,*h_v_r,*d_v_r;
float *d_mean;
h_v = (int*)malloc(n*sizeof(int));
cudaMalloc(&d_v,n*sizeof(int));
h_v_r = (int*)malloc(num_blocks*sizeof(int));
cudaMalloc(&d_v_r,num_blocks*sizeof(int));
cudaMalloc((void**)&d_mean,sizeof(float));
inititialise(h_v,n);
int minimum = 0;
for(int i =0;i<n;i++){
minimum = minimum+h_v[i];
}
//cout<<minimum<<endl;
float mean = minimum / n;
int var = 0;
t1 = clock();
for(int i =0;i<n;i++){
var = var + (h_v[i]-mean)*(h_v[i]-mean);
}
cout<<var<<endl;
t2 = clock();
elapsed_cpu = ((float)t2 - (float)t1) / CLOCKS_PER_SEC * 1000; //cpu elapsed time in ms
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMemcpy(d_v,h_v,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_mean,&mean,sizeof(float),cudaMemcpyHostToDevice);
variance<<<num_blocks,thread_block_size>>>(d_v,d_v_r,d_mean);
sum_reduction<<<1,thread_block_size>>>(d_v_r,d_v_r);
cudaMemcpy(h_v_r,d_v_r,thread_block_size*sizeof(int),cudaMemcpyDeviceToHost);
cout<<h_v_r[0]<<endl;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<elapsed_cpu<<endl;
cout<<elapsed_gpu<<endl;
cout<<"speedup"<<elapsed_cpu/elapsed_gpu<<endl;
return 0;
} | code for sm_80
Function : _Z13min_reductionPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R6, c[0x0][0x0], R7 ; /* 0x0000000006027a24 */
/* 0x001fc800078e0207 */
/*0060*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0090*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f05270 */
/*00a0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00b0*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf25270 */
/*00c0*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041e80000004800 */
/*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00e0*/ @!P1 BRA 0x1b0 ; /* 0x000000c000009947 */
/* 0x000fea0003800000 */
/*00f0*/ SHF.L.U32 R0, R7, 0x2, RZ ; /* 0x0000000207007819 */
/* 0x001fe200000006ff */
/*0100*/ IMAD.U32 R3, RZ, RZ, UR4 ; /* 0x00000004ff037e24 */
/* 0x000fca000f8e00ff */
/*0110*/ ISETP.GE.U32.AND P1, PT, R7, R3, PT ; /* 0x000000030700720c */
/* 0x000fda0003f26070 */
/*0120*/ @!P1 LEA R4, R3, R0, 0x2 ; /* 0x0000000003049211 */
/* 0x000fe200078e10ff */
/*0130*/ @!P1 LDS R2, [R7.X4] ; /* 0x0000000007029984 */
/* 0x000fe20000004800 */
/*0140*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc60000011603 */
/*0150*/ @!P1 LDS R5, [R4] ; /* 0x0000000004059984 */
/* 0x000e240000000800 */
/*0160*/ @!P1 IMNMX R2, R2, R5, PT ; /* 0x0000000502029217 */
/* 0x001fca0003800200 */
/*0170*/ @!P1 STS [R7.X4], R2 ; /* 0x0000000207009388 */
/* 0x0001e80000004800 */
/*0180*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0190*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f25270 */
/*01a0*/ @P1 BRA 0x110 ; /* 0xffffff6000001947 */
/* 0x001fea000383ffff */
/*01b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*01c0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*01d0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*01e0*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x000fca00078e0003 */
/*01f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0200*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0210*/ BRA 0x210; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z8variancePiS_Pf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R7, c[0x0][0x0], R9 ; /* 0x0000000007027a24 */
/* 0x001fca00078e0209 */
/*0060*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ MOV R5, c[0x0][0x174] ; /* 0x00005d0000057a02 */
/* 0x000fe20000000f00 */
/*0090*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff047624 */
/* 0x000fe400078e00ff */
/*00a0*/ STS [R9.X4], R2 ; /* 0x0000000209007388 */
/* 0x004fe80000004800 */
/*00b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00c0*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */
/* 0x000ea2000c1e1900 */
/*00d0*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*00e0*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe20003f05270 */
/*00f0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fe20008011604 */
/*0100*/ LDS R0, [R9.X4] ; /* 0x0000000009007984 */
/* 0x000e2a0000004800 */
/*0110*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf25270 */
/*0120*/ I2F R0, R0 ; /* 0x0000000000007306 */
/* 0x001ea40000201400 */
/*0130*/ FADD R6, R0, -R5 ; /* 0x8000000500067221 */
/* 0x004fc80000000000 */
/*0140*/ FMUL R6, R6, R6 ; /* 0x0000000606067220 */
/* 0x000fcc0000400000 */
/*0150*/ F2I.TRUNC.NTZ R6, R6 ; /* 0x0000000600067305 */
/* 0x000e24000020f100 */
/*0160*/ STS [R9.X4], R6 ; /* 0x0000000609007388 */
/* 0x0011e80000004800 */
/*0170*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0180*/ @!P1 BRA 0x250 ; /* 0x000000c000009947 */
/* 0x000fea0003800000 */
/*0190*/ IMAD.SHL.U32 R0, R9, 0x4, RZ ; /* 0x0000000409007824 */
/* 0x001fe200078e00ff */
/*01a0*/ MOV R3, UR4 ; /* 0x0000000400037c02 */
/* 0x000fc80008000f00 */
/*01b0*/ ISETP.GE.U32.AND P1, PT, R9, R3, PT ; /* 0x000000030900720c */
/* 0x000fda0003f26070 */
/*01c0*/ @!P1 IMAD R2, R3, 0x4, R0 ; /* 0x0000000403029824 */
/* 0x000fe200078e0200 */
/*01d0*/ @!P1 LDS R4, [R9.X4] ; /* 0x0000000009049984 */
/* 0x000fe20000004800 */
/*01e0*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc60000011603 */
/*01f0*/ @!P1 LDS R5, [R2] ; /* 0x0000000002059984 */
/* 0x000e240000000800 */
/*0200*/ @!P1 IADD3 R4, R4, R5, RZ ; /* 0x0000000504049210 */
/* 0x001fca0007ffe0ff */
/*0210*/ @!P1 STS [R9.X4], R4 ; /* 0x0000000409009388 */
/* 0x0001e80000004800 */
/*0220*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0230*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f25270 */
/*0240*/ @P1 BRA 0x1b0 ; /* 0xffffff6000001947 */
/* 0x001fea000383ffff */
/*0250*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*0260*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*0270*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fc800078e00ff */
/*0280*/ IMAD.WIDE.U32 R2, R7, R2, c[0x0][0x168] ; /* 0x00005a0007027625 */
/* 0x000fca00078e0002 */
/*0290*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*02a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02b0*/ BRA 0x2b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13max_reductionPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R6, c[0x0][0x0], R7 ; /* 0x0000000006027a24 */
/* 0x001fc800078e0207 */
/*0060*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0090*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f05270 */
/*00a0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00b0*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf25270 */
/*00c0*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041e80000004800 */
/*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00e0*/ @!P1 BRA 0x1b0 ; /* 0x000000c000009947 */
/* 0x000fea0003800000 */
/*00f0*/ SHF.L.U32 R0, R7, 0x2, RZ ; /* 0x0000000207007819 */
/* 0x001fe200000006ff */
/*0100*/ IMAD.U32 R3, RZ, RZ, UR4 ; /* 0x00000004ff037e24 */
/* 0x000fca000f8e00ff */
/*0110*/ ISETP.GE.U32.AND P1, PT, R7, R3, PT ; /* 0x000000030700720c */
/* 0x000fda0003f26070 */
/*0120*/ @!P1 LEA R4, R3, R0, 0x2 ; /* 0x0000000003049211 */
/* 0x000fe200078e10ff */
/*0130*/ @!P1 LDS R2, [R7.X4] ; /* 0x0000000007029984 */
/* 0x000fe20000004800 */
/*0140*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc60000011603 */
/*0150*/ @!P1 LDS R5, [R4] ; /* 0x0000000004059984 */
/* 0x000e240000000800 */
/*0160*/ @!P1 IMNMX R2, R2, R5, !PT ; /* 0x0000000502029217 */
/* 0x001fca0007800200 */
/*0170*/ @!P1 STS [R7.X4], R2 ; /* 0x0000000207009388 */
/* 0x0001e80000004800 */
/*0180*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0190*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f25270 */
/*01a0*/ @P1 BRA 0x110 ; /* 0xffffff6000001947 */
/* 0x001fea000383ffff */
/*01b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*01c0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*01d0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*01e0*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x000fca00078e0003 */
/*01f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0200*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0210*/ BRA 0x210; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13sum_reductionPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R6, c[0x0][0x0], R7 ; /* 0x0000000006027a24 */
/* 0x001fca00078e0207 */
/*0060*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0090*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f05270 */
/*00a0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00b0*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf25270 */
/*00c0*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041e80000004800 */
/*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00e0*/ @!P1 BRA 0x1b0 ; /* 0x000000c000009947 */
/* 0x000fea0003800000 */
/*00f0*/ IMAD.SHL.U32 R0, R7, 0x4, RZ ; /* 0x0000000407007824 */
/* 0x001fe200078e00ff */
/*0100*/ MOV R3, UR4 ; /* 0x0000000400037c02 */
/* 0x000fc80008000f00 */
/*0110*/ ISETP.GE.U32.AND P1, PT, R7, R3, PT ; /* 0x000000030700720c */
/* 0x000fda0003f26070 */
/*0120*/ @!P1 IMAD R2, R3, 0x4, R0 ; /* 0x0000000403029824 */
/* 0x000fe200078e0200 */
/*0130*/ @!P1 LDS R4, [R7.X4] ; /* 0x0000000007049984 */
/* 0x000fe20000004800 */
/*0140*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc60000011603 */
/*0150*/ @!P1 LDS R5, [R2] ; /* 0x0000000002059984 */
/* 0x000e240000000800 */
/*0160*/ @!P1 IADD3 R4, R4, R5, RZ ; /* 0x0000000504049210 */
/* 0x001fca0007ffe0ff */
/*0170*/ @!P1 STS [R7.X4], R4 ; /* 0x0000000407009388 */
/* 0x0001e80000004800 */
/*0180*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0190*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f25270 */
/*01a0*/ @P1 BRA 0x110 ; /* 0xffffff6000001947 */
/* 0x001fea000383ffff */
/*01b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*01c0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*01d0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*01e0*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x000fca00078e0003 */
/*01f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0200*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0210*/ BRA 0x210; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<bits/stdc++.h>
#include<device_launch_parameters.h>
#include<cuda_runtime.h>
using namespace std;
#define SIZE 256
#define SSIZE SIZE*4 // sizeof(int)
__global__ void sum_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void max_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = max(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void variance(int *v,int *v_r,float *mean){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
partial_sum[threadIdx.x] = (partial_sum[threadIdx.x] - *mean) * (partial_sum[threadIdx.x] - *mean);
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void min_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = min(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
void inititialise(int* v,int n){
for(int i =0;i<n;i++){
v[i]= rand()%1000;
}
}
int main(){
int n = SIZE*SIZE;
float elapsed_cpu, elapsed_gpu;
clock_t t1, t2;
int thread_block_size = SIZE;
int num_blocks = n / thread_block_size;
int *h_v,*d_v,*h_v_r,*d_v_r;
float *d_mean;
h_v = (int*)malloc(n*sizeof(int));
cudaMalloc(&d_v,n*sizeof(int));
h_v_r = (int*)malloc(num_blocks*sizeof(int));
cudaMalloc(&d_v_r,num_blocks*sizeof(int));
cudaMalloc((void**)&d_mean,sizeof(float));
inititialise(h_v,n);
int minimum = 0;
for(int i =0;i<n;i++){
minimum = minimum+h_v[i];
}
//cout<<minimum<<endl;
float mean = minimum / n;
int var = 0;
t1 = clock();
for(int i =0;i<n;i++){
var = var + (h_v[i]-mean)*(h_v[i]-mean);
}
cout<<var<<endl;
t2 = clock();
elapsed_cpu = ((float)t2 - (float)t1) / CLOCKS_PER_SEC * 1000; //cpu elapsed time in ms
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMemcpy(d_v,h_v,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_mean,&mean,sizeof(float),cudaMemcpyHostToDevice);
variance<<<num_blocks,thread_block_size>>>(d_v,d_v_r,d_mean);
sum_reduction<<<1,thread_block_size>>>(d_v_r,d_v_r);
cudaMemcpy(h_v_r,d_v_r,thread_block_size*sizeof(int),cudaMemcpyDeviceToHost);
cout<<h_v_r[0]<<endl;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<elapsed_cpu<<endl;
cout<<elapsed_gpu<<endl;
cout<<"speedup"<<elapsed_cpu/elapsed_gpu<<endl;
return 0;
} | .file "tmpxft_00030b20_00000000-6_reduction.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB10863:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10863:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z12inititialisePii
.type _Z12inititialisePii, @function
_Z12inititialisePii:
.LFB10859:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $274877907, %rdx, %rdx
sarq $38, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $1000, %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE10859:
.size _Z12inititialisePii, .-_Z12inititialisePii
.globl _Z35__device_stub__Z13sum_reductionPiS_PiS_
.type _Z35__device_stub__Z13sum_reductionPiS_PiS_, @function
_Z35__device_stub__Z13sum_reductionPiS_PiS_:
.LFB10885:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13sum_reductionPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10885:
.size _Z35__device_stub__Z13sum_reductionPiS_PiS_, .-_Z35__device_stub__Z13sum_reductionPiS_PiS_
.globl _Z13sum_reductionPiS_
.type _Z13sum_reductionPiS_, @function
_Z13sum_reductionPiS_:
.LFB10886:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z13sum_reductionPiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10886:
.size _Z13sum_reductionPiS_, .-_Z13sum_reductionPiS_
.globl _Z35__device_stub__Z13max_reductionPiS_PiS_
.type _Z35__device_stub__Z13max_reductionPiS_PiS_, @function
_Z35__device_stub__Z13max_reductionPiS_PiS_:
.LFB10887:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13max_reductionPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10887:
.size _Z35__device_stub__Z13max_reductionPiS_PiS_, .-_Z35__device_stub__Z13max_reductionPiS_PiS_
.globl _Z13max_reductionPiS_
.type _Z13max_reductionPiS_, @function
_Z13max_reductionPiS_:
.LFB10888:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z13max_reductionPiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10888:
.size _Z13max_reductionPiS_, .-_Z13max_reductionPiS_
.globl _Z31__device_stub__Z8variancePiS_PfPiS_Pf
.type _Z31__device_stub__Z8variancePiS_PfPiS_Pf, @function
_Z31__device_stub__Z8variancePiS_PfPiS_Pf:
.LFB10889:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8variancePiS_Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10889:
.size _Z31__device_stub__Z8variancePiS_PfPiS_Pf, .-_Z31__device_stub__Z8variancePiS_PfPiS_Pf
.globl _Z8variancePiS_Pf
.type _Z8variancePiS_Pf, @function
_Z8variancePiS_Pf:
.LFB10890:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z8variancePiS_PfPiS_Pf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10890:
.size _Z8variancePiS_Pf, .-_Z8variancePiS_Pf
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "speedup"
.text
.globl main
.type main, @function
main:
.LFB10860:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $96, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $262144, %edi
call malloc@PLT
movq %rax, %r12
leaq 24(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
movl $1024, %edi
call malloc@PLT
movq %rax, %r13
leaq 32(%rsp), %rdi
movl $1024, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $65536, %esi
movq %r12, %rdi
call _Z12inititialisePii
movq %r12, %rbx
leaq 262144(%r12), %rbp
movq %r12, %rax
movl $0, %ecx
.L36:
addl (%rax), %ecx
addq $4, %rax
cmpq %rbp, %rax
jne .L36
leal 65535(%rcx), %eax
testl %ecx, %ecx
cmovns %ecx, %eax
sarl $16, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 20(%rsp)
call clock@PLT
movq %rax, %r14
movss 20(%rsp), %xmm2
movl $0, %esi
.L37:
pxor %xmm0, %xmm0
cvtsi2ssl (%rbx), %xmm0
subss %xmm2, %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl %esi, %xmm1
mulss %xmm0, %xmm0
addss %xmm1, %xmm0
cvttss2sil %xmm0, %esi
addq $4, %rbx
cmpq %rbp, %rbx
jne .L37
leaq _ZSt4cout(%rip), %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
call clock@PLT
pxor %xmm0, %xmm0
cvtsi2ssq %rax, %xmm0
pxor %xmm1, %xmm1
cvtsi2ssq %r14, %xmm1
subss %xmm1, %xmm0
divss .LC0(%rip), %xmm0
mulss .LC1(%rip), %xmm0
movss %xmm0, 12(%rsp)
leaq 48(%rsp), %rdi
call cudaEventCreate@PLT
leaq 56(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, %ecx
movl $262144, %edx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
leaq 20(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $256, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L44
.L38:
movl $256, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L45
.L39:
movl $2, %ecx
movl $1024, %edx
movq 32(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl 0(%r13), %esi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $0, %esi
movq 56(%rsp), %rdi
call cudaEventRecord@PLT
movq 56(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 76(%rsp), %rdi
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
call cudaEventElapsedTime@PLT
movq 48(%rsp), %rdi
call cudaEventDestroy@PLT
movq 56(%rsp), %rdi
call cudaEventDestroy@PLT
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
pxor %xmm0, %xmm0
cvtss2sd 76(%rsp), %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC2(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movss 12(%rsp), %xmm0
divss 76(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L46
movl $0, %eax
addq $96, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L44:
.cfi_restore_state
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z31__device_stub__Z8variancePiS_PfPiS_Pf
jmp .L38
.L45:
movq 32(%rsp), %rdi
movq %rdi, %rsi
call _Z35__device_stub__Z13sum_reductionPiS_PiS_
jmp .L39
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10860:
.size main, .-main
.globl _Z35__device_stub__Z13min_reductionPiS_PiS_
.type _Z35__device_stub__Z13min_reductionPiS_PiS_, @function
_Z35__device_stub__Z13min_reductionPiS_PiS_:
.LFB10891:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L51
.L47:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L52
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L51:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13min_reductionPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L47
.L52:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10891:
.size _Z35__device_stub__Z13min_reductionPiS_PiS_, .-_Z35__device_stub__Z13min_reductionPiS_PiS_
.globl _Z13min_reductionPiS_
.type _Z13min_reductionPiS_, @function
_Z13min_reductionPiS_:
.LFB10892:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z13min_reductionPiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10892:
.size _Z13min_reductionPiS_, .-_Z13min_reductionPiS_
.section .rodata.str1.1
.LC3:
.string "_Z13min_reductionPiS_"
.LC4:
.string "_Z8variancePiS_Pf"
.LC5:
.string "_Z13max_reductionPiS_"
.LC6:
.string "_Z13sum_reductionPiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB10894:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z13min_reductionPiS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z8variancePiS_Pf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z13max_reductionPiS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z13sum_reductionPiS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10894:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1232348160
.align 4
.LC1:
.long 1148846080
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<bits/stdc++.h>
#include<device_launch_parameters.h>
#include<cuda_runtime.h>
using namespace std;
#define SIZE 256
#define SSIZE SIZE*4 // sizeof(int)
__global__ void sum_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void max_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = max(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void variance(int *v,int *v_r,float *mean){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
partial_sum[threadIdx.x] = (partial_sum[threadIdx.x] - *mean) * (partial_sum[threadIdx.x] - *mean);
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void min_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = min(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
void inititialise(int* v,int n){
for(int i =0;i<n;i++){
v[i]= rand()%1000;
}
}
int main(){
int n = SIZE*SIZE;
float elapsed_cpu, elapsed_gpu;
clock_t t1, t2;
int thread_block_size = SIZE;
int num_blocks = n / thread_block_size;
int *h_v,*d_v,*h_v_r,*d_v_r;
float *d_mean;
h_v = (int*)malloc(n*sizeof(int));
cudaMalloc(&d_v,n*sizeof(int));
h_v_r = (int*)malloc(num_blocks*sizeof(int));
cudaMalloc(&d_v_r,num_blocks*sizeof(int));
cudaMalloc((void**)&d_mean,sizeof(float));
inititialise(h_v,n);
int minimum = 0;
for(int i =0;i<n;i++){
minimum = minimum+h_v[i];
}
//cout<<minimum<<endl;
float mean = minimum / n;
int var = 0;
t1 = clock();
for(int i =0;i<n;i++){
var = var + (h_v[i]-mean)*(h_v[i]-mean);
}
cout<<var<<endl;
t2 = clock();
elapsed_cpu = ((float)t2 - (float)t1) / CLOCKS_PER_SEC * 1000; //cpu elapsed time in ms
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMemcpy(d_v,h_v,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_mean,&mean,sizeof(float),cudaMemcpyHostToDevice);
variance<<<num_blocks,thread_block_size>>>(d_v,d_v_r,d_mean);
sum_reduction<<<1,thread_block_size>>>(d_v_r,d_v_r);
cudaMemcpy(h_v_r,d_v_r,thread_block_size*sizeof(int),cudaMemcpyDeviceToHost);
cout<<h_v_r[0]<<endl;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<elapsed_cpu<<endl;
cout<<elapsed_gpu<<endl;
cout<<"speedup"<<elapsed_cpu/elapsed_gpu<<endl;
return 0;
} | #include<bits/stdc++.h>
#include<hip/hip_runtime.h>
using namespace std;
#define SIZE 256
#define SSIZE SIZE*4 // sizeof(int)
__global__ void sum_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void max_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = max(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void variance(int *v,int *v_r,float *mean){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
partial_sum[threadIdx.x] = (partial_sum[threadIdx.x] - *mean) * (partial_sum[threadIdx.x] - *mean);
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void min_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = min(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
void inititialise(int* v,int n){
for(int i =0;i<n;i++){
v[i]= rand()%1000;
}
}
int main(){
int n = SIZE*SIZE;
float elapsed_cpu, elapsed_gpu;
clock_t t1, t2;
int thread_block_size = SIZE;
int num_blocks = n / thread_block_size;
int *h_v,*d_v,*h_v_r,*d_v_r;
float *d_mean;
h_v = (int*)malloc(n*sizeof(int));
hipMalloc(&d_v,n*sizeof(int));
h_v_r = (int*)malloc(num_blocks*sizeof(int));
hipMalloc(&d_v_r,num_blocks*sizeof(int));
hipMalloc((void**)&d_mean,sizeof(float));
inititialise(h_v,n);
int minimum = 0;
for(int i =0;i<n;i++){
minimum = minimum+h_v[i];
}
//cout<<minimum<<endl;
float mean = minimum / n;
int var = 0;
t1 = clock();
for(int i =0;i<n;i++){
var = var + (h_v[i]-mean)*(h_v[i]-mean);
}
cout<<var<<endl;
t2 = clock();
elapsed_cpu = ((float)t2 - (float)t1) / CLOCKS_PER_SEC * 1000; //cpu elapsed time in ms
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipMemcpy(d_v,h_v,n*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_mean,&mean,sizeof(float),hipMemcpyHostToDevice);
variance<<<num_blocks,thread_block_size>>>(d_v,d_v_r,d_mean);
sum_reduction<<<1,thread_block_size>>>(d_v_r,d_v_r);
hipMemcpy(h_v_r,d_v_r,thread_block_size*sizeof(int),hipMemcpyDeviceToHost);
cout<<h_v_r[0]<<endl;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_gpu, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
cout<<elapsed_cpu<<endl;
cout<<elapsed_gpu<<endl;
cout<<"speedup"<<elapsed_cpu/elapsed_gpu<<endl;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include<bits/stdc++.h>
#include<hip/hip_runtime.h>
using namespace std;
#define SIZE 256
#define SSIZE SIZE*4 // sizeof(int)
__global__ void sum_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void max_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = max(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void variance(int *v,int *v_r,float *mean){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
partial_sum[threadIdx.x] = (partial_sum[threadIdx.x] - *mean) * (partial_sum[threadIdx.x] - *mean);
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void min_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = min(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
void inititialise(int* v,int n){
for(int i =0;i<n;i++){
v[i]= rand()%1000;
}
}
int main(){
int n = SIZE*SIZE;
float elapsed_cpu, elapsed_gpu;
clock_t t1, t2;
int thread_block_size = SIZE;
int num_blocks = n / thread_block_size;
int *h_v,*d_v,*h_v_r,*d_v_r;
float *d_mean;
h_v = (int*)malloc(n*sizeof(int));
hipMalloc(&d_v,n*sizeof(int));
h_v_r = (int*)malloc(num_blocks*sizeof(int));
hipMalloc(&d_v_r,num_blocks*sizeof(int));
hipMalloc((void**)&d_mean,sizeof(float));
inititialise(h_v,n);
int minimum = 0;
for(int i =0;i<n;i++){
minimum = minimum+h_v[i];
}
//cout<<minimum<<endl;
float mean = minimum / n;
int var = 0;
t1 = clock();
for(int i =0;i<n;i++){
var = var + (h_v[i]-mean)*(h_v[i]-mean);
}
cout<<var<<endl;
t2 = clock();
elapsed_cpu = ((float)t2 - (float)t1) / CLOCKS_PER_SEC * 1000; //cpu elapsed time in ms
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipMemcpy(d_v,h_v,n*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_mean,&mean,sizeof(float),hipMemcpyHostToDevice);
variance<<<num_blocks,thread_block_size>>>(d_v,d_v_r,d_mean);
sum_reduction<<<1,thread_block_size>>>(d_v_r,d_v_r);
hipMemcpy(h_v_r,d_v_r,thread_block_size*sizeof(int),hipMemcpyDeviceToHost);
cout<<h_v_r[0]<<endl;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_gpu, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
cout<<elapsed_cpu<<endl;
cout<<elapsed_gpu<<endl;
cout<<"speedup"<<elapsed_cpu/elapsed_gpu<<endl;
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13sum_reductionPiS_
.globl _Z13sum_reductionPiS_
.p2align 8
.type _Z13sum_reductionPiS_,@function
_Z13sum_reductionPiS_:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x1c
s_load_b64 s[4:5], s[0:1], 0x0
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
s_cmp_lt_u32 s3, 2
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_add_co_u32 v1, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
global_load_b32 v2, v[1:2], off
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB0_2
.p2align 6
.LBB0_1:
s_or_b32 exec_lo, exec_lo, s5
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s3, 4
s_mov_b32 s3, s4
.LBB0_2:
buffer_gl0_inv
s_cbranch_scc1 .LBB0_5
s_lshr_b32 s4, s3, 1
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB0_1
v_add_lshl_u32 v2, s4, v0, 2
ds_load_b32 v2, v2
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB0_1
.LBB0_5:
s_mov_b32 s3, 0
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_7
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13sum_reductionPiS_
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13sum_reductionPiS_, .Lfunc_end0-_Z13sum_reductionPiS_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13max_reductionPiS_
.globl _Z13max_reductionPiS_
.p2align 8
.type _Z13max_reductionPiS_,@function
_Z13max_reductionPiS_:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x1c
s_load_b64 s[4:5], s[0:1], 0x0
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
s_cmp_lt_u32 s3, 2
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_add_co_u32 v1, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
global_load_b32 v2, v[1:2], off
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB1_2
.p2align 6
.LBB1_1:
s_or_b32 exec_lo, exec_lo, s5
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s3, 4
s_mov_b32 s3, s4
.LBB1_2:
buffer_gl0_inv
s_cbranch_scc1 .LBB1_5
s_lshr_b32 s4, s3, 1
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB1_1
v_add_lshl_u32 v2, s4, v0, 2
ds_load_b32 v3, v1
ds_load_b32 v2, v2
s_waitcnt lgkmcnt(0)
v_max_i32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB1_1
.LBB1_5:
s_mov_b32 s3, 0
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB1_7
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB1_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13max_reductionPiS_
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z13max_reductionPiS_, .Lfunc_end1-_Z13max_reductionPiS_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z8variancePiS_Pf
.globl _Z8variancePiS_Pf
.p2align 8
.type _Z8variancePiS_Pf,@function
_Z8variancePiS_Pf:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b64 s[6:7], s[0:1], 0x10
s_mov_b32 s2, s15
s_load_b64 s[4:5], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
s_cmp_lt_u32 s3, 2
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_add_co_u32 v1, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
global_load_b32 v2, v[1:2], off
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v2, v1
s_load_b32 s4, s[6:7], 0x0
s_waitcnt lgkmcnt(0)
v_cvt_f32_i32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_subrev_f32_e32 v2, s4, v2
v_mul_f32_e32 v2, v2, v2
s_delay_alu instid0(VALU_DEP_1)
v_cvt_i32_f32_e32 v2, v2
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB2_2
.p2align 6
.LBB2_1:
s_or_b32 exec_lo, exec_lo, s5
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s3, 4
s_mov_b32 s3, s4
.LBB2_2:
buffer_gl0_inv
s_cbranch_scc1 .LBB2_5
s_lshr_b32 s4, s3, 1
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB2_1
v_add_lshl_u32 v2, s4, v0, 2
ds_load_b32 v2, v2
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB2_1
.LBB2_5:
s_mov_b32 s3, 0
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB2_7
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB2_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8variancePiS_Pf
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z8variancePiS_Pf, .Lfunc_end2-_Z8variancePiS_Pf
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13min_reductionPiS_
.globl _Z13min_reductionPiS_
.p2align 8
.type _Z13min_reductionPiS_,@function
_Z13min_reductionPiS_:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x1c
s_load_b64 s[4:5], s[0:1], 0x0
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
s_cmp_lt_u32 s3, 2
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_add_co_u32 v1, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
global_load_b32 v2, v[1:2], off
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB3_2
.p2align 6
.LBB3_1:
s_or_b32 exec_lo, exec_lo, s5
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s3, 4
s_mov_b32 s3, s4
.LBB3_2:
buffer_gl0_inv
s_cbranch_scc1 .LBB3_5
s_lshr_b32 s4, s3, 1
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB3_1
v_add_lshl_u32 v2, s4, v0, 2
ds_load_b32 v3, v1
ds_load_b32 v2, v2
s_waitcnt lgkmcnt(0)
v_min_i32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB3_1
.LBB3_5:
s_mov_b32 s3, 0
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB3_7
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB3_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13min_reductionPiS_
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z13min_reductionPiS_, .Lfunc_end3-_Z13min_reductionPiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13sum_reductionPiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13sum_reductionPiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13max_reductionPiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13max_reductionPiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8variancePiS_Pf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8variancePiS_Pf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13min_reductionPiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13min_reductionPiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include<bits/stdc++.h>
#include<hip/hip_runtime.h>
using namespace std;
#define SIZE 256
#define SSIZE SIZE*4 // sizeof(int)
__global__ void sum_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void max_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = max(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void variance(int *v,int *v_r,float *mean){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
partial_sum[threadIdx.x] = (partial_sum[threadIdx.x] - *mean) * (partial_sum[threadIdx.x] - *mean);
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void min_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = min(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
void inititialise(int* v,int n){
for(int i =0;i<n;i++){
v[i]= rand()%1000;
}
}
int main(){
int n = SIZE*SIZE;
float elapsed_cpu, elapsed_gpu;
clock_t t1, t2;
int thread_block_size = SIZE;
int num_blocks = n / thread_block_size;
int *h_v,*d_v,*h_v_r,*d_v_r;
float *d_mean;
h_v = (int*)malloc(n*sizeof(int));
hipMalloc(&d_v,n*sizeof(int));
h_v_r = (int*)malloc(num_blocks*sizeof(int));
hipMalloc(&d_v_r,num_blocks*sizeof(int));
hipMalloc((void**)&d_mean,sizeof(float));
inititialise(h_v,n);
int minimum = 0;
for(int i =0;i<n;i++){
minimum = minimum+h_v[i];
}
//cout<<minimum<<endl;
float mean = minimum / n;
int var = 0;
t1 = clock();
for(int i =0;i<n;i++){
var = var + (h_v[i]-mean)*(h_v[i]-mean);
}
cout<<var<<endl;
t2 = clock();
elapsed_cpu = ((float)t2 - (float)t1) / CLOCKS_PER_SEC * 1000; //cpu elapsed time in ms
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipMemcpy(d_v,h_v,n*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_mean,&mean,sizeof(float),hipMemcpyHostToDevice);
variance<<<num_blocks,thread_block_size>>>(d_v,d_v_r,d_mean);
sum_reduction<<<1,thread_block_size>>>(d_v_r,d_v_r);
hipMemcpy(h_v_r,d_v_r,thread_block_size*sizeof(int),hipMemcpyDeviceToHost);
cout<<h_v_r[0]<<endl;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_gpu, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
cout<<elapsed_cpu<<endl;
cout<<elapsed_gpu<<endl;
cout<<"speedup"<<elapsed_cpu/elapsed_gpu<<endl;
return 0;
} | .text
.file "reduction.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z28__device_stub__sum_reductionPiS_ # -- Begin function _Z28__device_stub__sum_reductionPiS_
.p2align 4, 0x90
.type _Z28__device_stub__sum_reductionPiS_,@function
_Z28__device_stub__sum_reductionPiS_: # @_Z28__device_stub__sum_reductionPiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z13sum_reductionPiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z28__device_stub__sum_reductionPiS_, .Lfunc_end0-_Z28__device_stub__sum_reductionPiS_
.cfi_endproc
# -- End function
.globl _Z28__device_stub__max_reductionPiS_ # -- Begin function _Z28__device_stub__max_reductionPiS_
.p2align 4, 0x90
.type _Z28__device_stub__max_reductionPiS_,@function
_Z28__device_stub__max_reductionPiS_: # @_Z28__device_stub__max_reductionPiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z13max_reductionPiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z28__device_stub__max_reductionPiS_, .Lfunc_end1-_Z28__device_stub__max_reductionPiS_
.cfi_endproc
# -- End function
.globl _Z23__device_stub__variancePiS_Pf # -- Begin function _Z23__device_stub__variancePiS_Pf
.p2align 4, 0x90
.type _Z23__device_stub__variancePiS_Pf,@function
_Z23__device_stub__variancePiS_Pf: # @_Z23__device_stub__variancePiS_Pf
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8variancePiS_Pf, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z23__device_stub__variancePiS_Pf, .Lfunc_end2-_Z23__device_stub__variancePiS_Pf
.cfi_endproc
# -- End function
.globl _Z28__device_stub__min_reductionPiS_ # -- Begin function _Z28__device_stub__min_reductionPiS_
.p2align 4, 0x90
.type _Z28__device_stub__min_reductionPiS_,@function
_Z28__device_stub__min_reductionPiS_: # @_Z28__device_stub__min_reductionPiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z13min_reductionPiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end3:
.size _Z28__device_stub__min_reductionPiS_, .Lfunc_end3-_Z28__device_stub__min_reductionPiS_
.cfi_endproc
# -- End function
.globl _Z12inititialisePii # -- Begin function _Z12inititialisePii
.p2align 4, 0x90
.type _Z12inititialisePii,@function
_Z12inititialisePii: # @_Z12inititialisePii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB4_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB4_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB4_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB4_4: # %._crit_edge
retq
.Lfunc_end4:
.size _Z12inititialisePii, .Lfunc_end4-_Z12inititialisePii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI5_0:
.long 0x49742400 # float 1.0E+6
.LCPI5_1:
.long 0x447a0000 # float 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $144, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $262144, %edi # imm = 0x40000
callq malloc
movq %rax, %r15
leaq 128(%rsp), %rdi
movl $262144, %esi # imm = 0x40000
callq hipMalloc
movl $1024, %edi # imm = 0x400
callq malloc
movq %rax, %rbx
leaq 24(%rsp), %rdi
movl $1024, %esi # imm = 0x400
callq hipMalloc
leaq 120(%rsp), %rdi
movl $4, %esi
callq hipMalloc
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB5_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%r15,%r14,4)
incq %r14
cmpq $65536, %r14 # imm = 0x10000
jne .LBB5_1
# %bb.2: # %_Z12inititialisePii.exit.preheader
xorl %ecx, %ecx
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_3: # %_Z12inititialisePii.exit
# =>This Inner Loop Header: Depth=1
addl (%r15,%rcx,4), %eax
incq %rcx
cmpq $65536, %rcx # imm = 0x10000
jne .LBB5_3
# %bb.4:
leal 65535(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $16, %ecx
cvtsi2ss %ecx, %xmm0
movss %xmm0, 4(%rsp)
callq clock
movq %rax, %r14
xorl %eax, %eax
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorl %esi, %esi
.p2align 4, 0x90
.LBB5_5: # =>This Inner Loop Header: Depth=1
xorps %xmm1, %xmm1
cvtsi2ss %esi, %xmm1
xorps %xmm2, %xmm2
cvtsi2ssl (%r15,%rax,4), %xmm2
subss %xmm0, %xmm2
mulss %xmm2, %xmm2
addss %xmm1, %xmm2
cvttss2si %xmm2, %esi
incq %rax
cmpq $65536, %rax # imm = 0x10000
jne .LBB5_5
# %bb.6:
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB5_31
# %bb.7: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r12)
je .LBB5_9
# %bb.8:
movzbl 67(%r12), %ecx
jmp .LBB5_10
.LBB5_9:
movq %r12, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB5_10: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movabsq $4294967552, %r13 # imm = 0x100000100
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
callq clock
movq %rax, %r12
leaq 16(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 128(%rsp), %rdi
movl $262144, %edx # imm = 0x40000
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 120(%rsp), %rdi
leaq 4(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movq %r13, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_12
# %bb.11:
movq 128(%rsp), %rax
movq 24(%rsp), %rcx
movq 120(%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 40(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 136(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z8variancePiS_Pf, %edi
pushq 136(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_12:
leaq -255(%r13), %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_14
# %bb.13:
movq 24(%rsp), %rax
movq %rax, 88(%rsp)
movq %rax, 80(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13sum_reductionPiS_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_14:
movq 24(%rsp), %rsi
movl $1024, %edx # imm = 0x400
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl (%rbx), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB5_31
# %bb.15: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i53
cvtsi2ss %r12, %xmm1
cvtsi2ss %r14, %xmm0
subss %xmm0, %xmm1
divss .LCPI5_0(%rip), %xmm1
mulss .LCPI5_1(%rip), %xmm1
movss %xmm1, (%rsp) # 4-byte Spill
cmpb $0, 56(%rbx)
je .LBB5_17
# %bb.16:
movzbl 67(%rbx), %ecx
jmp .LBB5_18
.LBB5_17:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB5_18: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit56
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 16(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 96(%rsp), %rdi
callq hipEventElapsedTime
movq 16(%rsp), %rdi
callq hipEventDestroy
movq 8(%rsp), %rdi
callq hipEventDestroy
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB5_31
# %bb.19: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i58
cmpb $0, 56(%rbx)
je .LBB5_21
# %bb.20:
movzbl 67(%rbx), %ecx
jmp .LBB5_22
.LBB5_21:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB5_22: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit61
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movss 96(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB5_31
# %bb.23: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i63
cmpb $0, 56(%rbx)
je .LBB5_25
# %bb.24:
movzbl 67(%rbx), %ecx
jmp .LBB5_26
.LBB5_25:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB5_26: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit66
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
divss 96(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB5_31
# %bb.27: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i68
cmpb $0, 56(%rbx)
je .LBB5_29
# %bb.28:
movzbl 67(%rbx), %ecx
jmp .LBB5_30
.LBB5_29:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB5_30: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit71
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $144, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB5_31:
.cfi_def_cfa_offset 192
callq _ZSt16__throw_bad_castv
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13sum_reductionPiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13max_reductionPiS_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8variancePiS_Pf, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13min_reductionPiS_, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13sum_reductionPiS_,@object # @_Z13sum_reductionPiS_
.section .rodata,"a",@progbits
.globl _Z13sum_reductionPiS_
.p2align 3, 0x0
_Z13sum_reductionPiS_:
.quad _Z28__device_stub__sum_reductionPiS_
.size _Z13sum_reductionPiS_, 8
.type _Z13max_reductionPiS_,@object # @_Z13max_reductionPiS_
.globl _Z13max_reductionPiS_
.p2align 3, 0x0
_Z13max_reductionPiS_:
.quad _Z28__device_stub__max_reductionPiS_
.size _Z13max_reductionPiS_, 8
.type _Z8variancePiS_Pf,@object # @_Z8variancePiS_Pf
.globl _Z8variancePiS_Pf
.p2align 3, 0x0
_Z8variancePiS_Pf:
.quad _Z23__device_stub__variancePiS_Pf
.size _Z8variancePiS_Pf, 8
.type _Z13min_reductionPiS_,@object # @_Z13min_reductionPiS_
.globl _Z13min_reductionPiS_
.p2align 3, 0x0
_Z13min_reductionPiS_:
.quad _Z28__device_stub__min_reductionPiS_
.size _Z13min_reductionPiS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "speedup"
.size .L.str, 8
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z13sum_reductionPiS_"
.size .L__unnamed_1, 22
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z13max_reductionPiS_"
.size .L__unnamed_2, 22
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z8variancePiS_Pf"
.size .L__unnamed_3, 18
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z13min_reductionPiS_"
.size .L__unnamed_4, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__sum_reductionPiS_
.addrsig_sym _Z28__device_stub__max_reductionPiS_
.addrsig_sym _Z23__device_stub__variancePiS_Pf
.addrsig_sym _Z28__device_stub__min_reductionPiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13sum_reductionPiS_
.addrsig_sym _Z13max_reductionPiS_
.addrsig_sym _Z8variancePiS_Pf
.addrsig_sym _Z13min_reductionPiS_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z13min_reductionPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R6, c[0x0][0x0], R7 ; /* 0x0000000006027a24 */
/* 0x001fc800078e0207 */
/*0060*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0090*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f05270 */
/*00a0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00b0*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf25270 */
/*00c0*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041e80000004800 */
/*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00e0*/ @!P1 BRA 0x1b0 ; /* 0x000000c000009947 */
/* 0x000fea0003800000 */
/*00f0*/ SHF.L.U32 R0, R7, 0x2, RZ ; /* 0x0000000207007819 */
/* 0x001fe200000006ff */
/*0100*/ IMAD.U32 R3, RZ, RZ, UR4 ; /* 0x00000004ff037e24 */
/* 0x000fca000f8e00ff */
/*0110*/ ISETP.GE.U32.AND P1, PT, R7, R3, PT ; /* 0x000000030700720c */
/* 0x000fda0003f26070 */
/*0120*/ @!P1 LEA R4, R3, R0, 0x2 ; /* 0x0000000003049211 */
/* 0x000fe200078e10ff */
/*0130*/ @!P1 LDS R2, [R7.X4] ; /* 0x0000000007029984 */
/* 0x000fe20000004800 */
/*0140*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc60000011603 */
/*0150*/ @!P1 LDS R5, [R4] ; /* 0x0000000004059984 */
/* 0x000e240000000800 */
/*0160*/ @!P1 IMNMX R2, R2, R5, PT ; /* 0x0000000502029217 */
/* 0x001fca0003800200 */
/*0170*/ @!P1 STS [R7.X4], R2 ; /* 0x0000000207009388 */
/* 0x0001e80000004800 */
/*0180*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0190*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f25270 */
/*01a0*/ @P1 BRA 0x110 ; /* 0xffffff6000001947 */
/* 0x001fea000383ffff */
/*01b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*01c0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*01d0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*01e0*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x000fca00078e0003 */
/*01f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0200*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0210*/ BRA 0x210; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z8variancePiS_Pf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R7, c[0x0][0x0], R9 ; /* 0x0000000007027a24 */
/* 0x001fca00078e0209 */
/*0060*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ MOV R5, c[0x0][0x174] ; /* 0x00005d0000057a02 */
/* 0x000fe20000000f00 */
/*0090*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff047624 */
/* 0x000fe400078e00ff */
/*00a0*/ STS [R9.X4], R2 ; /* 0x0000000209007388 */
/* 0x004fe80000004800 */
/*00b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00c0*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */
/* 0x000ea2000c1e1900 */
/*00d0*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*00e0*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe20003f05270 */
/*00f0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fe20008011604 */
/*0100*/ LDS R0, [R9.X4] ; /* 0x0000000009007984 */
/* 0x000e2a0000004800 */
/*0110*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf25270 */
/*0120*/ I2F R0, R0 ; /* 0x0000000000007306 */
/* 0x001ea40000201400 */
/*0130*/ FADD R6, R0, -R5 ; /* 0x8000000500067221 */
/* 0x004fc80000000000 */
/*0140*/ FMUL R6, R6, R6 ; /* 0x0000000606067220 */
/* 0x000fcc0000400000 */
/*0150*/ F2I.TRUNC.NTZ R6, R6 ; /* 0x0000000600067305 */
/* 0x000e24000020f100 */
/*0160*/ STS [R9.X4], R6 ; /* 0x0000000609007388 */
/* 0x0011e80000004800 */
/*0170*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0180*/ @!P1 BRA 0x250 ; /* 0x000000c000009947 */
/* 0x000fea0003800000 */
/*0190*/ IMAD.SHL.U32 R0, R9, 0x4, RZ ; /* 0x0000000409007824 */
/* 0x001fe200078e00ff */
/*01a0*/ MOV R3, UR4 ; /* 0x0000000400037c02 */
/* 0x000fc80008000f00 */
/*01b0*/ ISETP.GE.U32.AND P1, PT, R9, R3, PT ; /* 0x000000030900720c */
/* 0x000fda0003f26070 */
/*01c0*/ @!P1 IMAD R2, R3, 0x4, R0 ; /* 0x0000000403029824 */
/* 0x000fe200078e0200 */
/*01d0*/ @!P1 LDS R4, [R9.X4] ; /* 0x0000000009049984 */
/* 0x000fe20000004800 */
/*01e0*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc60000011603 */
/*01f0*/ @!P1 LDS R5, [R2] ; /* 0x0000000002059984 */
/* 0x000e240000000800 */
/*0200*/ @!P1 IADD3 R4, R4, R5, RZ ; /* 0x0000000504049210 */
/* 0x001fca0007ffe0ff */
/*0210*/ @!P1 STS [R9.X4], R4 ; /* 0x0000000409009388 */
/* 0x0001e80000004800 */
/*0220*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0230*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f25270 */
/*0240*/ @P1 BRA 0x1b0 ; /* 0xffffff6000001947 */
/* 0x001fea000383ffff */
/*0250*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*0260*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*0270*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fc800078e00ff */
/*0280*/ IMAD.WIDE.U32 R2, R7, R2, c[0x0][0x168] ; /* 0x00005a0007027625 */
/* 0x000fca00078e0002 */
/*0290*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*02a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02b0*/ BRA 0x2b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13max_reductionPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R6, c[0x0][0x0], R7 ; /* 0x0000000006027a24 */
/* 0x001fc800078e0207 */
/*0060*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0090*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f05270 */
/*00a0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00b0*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf25270 */
/*00c0*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041e80000004800 */
/*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00e0*/ @!P1 BRA 0x1b0 ; /* 0x000000c000009947 */
/* 0x000fea0003800000 */
/*00f0*/ SHF.L.U32 R0, R7, 0x2, RZ ; /* 0x0000000207007819 */
/* 0x001fe200000006ff */
/*0100*/ IMAD.U32 R3, RZ, RZ, UR4 ; /* 0x00000004ff037e24 */
/* 0x000fca000f8e00ff */
/*0110*/ ISETP.GE.U32.AND P1, PT, R7, R3, PT ; /* 0x000000030700720c */
/* 0x000fda0003f26070 */
/*0120*/ @!P1 LEA R4, R3, R0, 0x2 ; /* 0x0000000003049211 */
/* 0x000fe200078e10ff */
/*0130*/ @!P1 LDS R2, [R7.X4] ; /* 0x0000000007029984 */
/* 0x000fe20000004800 */
/*0140*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc60000011603 */
/*0150*/ @!P1 LDS R5, [R4] ; /* 0x0000000004059984 */
/* 0x000e240000000800 */
/*0160*/ @!P1 IMNMX R2, R2, R5, !PT ; /* 0x0000000502029217 */
/* 0x001fca0007800200 */
/*0170*/ @!P1 STS [R7.X4], R2 ; /* 0x0000000207009388 */
/* 0x0001e80000004800 */
/*0180*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0190*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f25270 */
/*01a0*/ @P1 BRA 0x110 ; /* 0xffffff6000001947 */
/* 0x001fea000383ffff */
/*01b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*01c0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*01d0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*01e0*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x000fca00078e0003 */
/*01f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0200*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0210*/ BRA 0x210; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13sum_reductionPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R6, c[0x0][0x0], R7 ; /* 0x0000000006027a24 */
/* 0x001fca00078e0207 */
/*0060*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0090*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f05270 */
/*00a0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00b0*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf25270 */
/*00c0*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041e80000004800 */
/*00d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00e0*/ @!P1 BRA 0x1b0 ; /* 0x000000c000009947 */
/* 0x000fea0003800000 */
/*00f0*/ IMAD.SHL.U32 R0, R7, 0x4, RZ ; /* 0x0000000407007824 */
/* 0x001fe200078e00ff */
/*0100*/ MOV R3, UR4 ; /* 0x0000000400037c02 */
/* 0x000fc80008000f00 */
/*0110*/ ISETP.GE.U32.AND P1, PT, R7, R3, PT ; /* 0x000000030700720c */
/* 0x000fda0003f26070 */
/*0120*/ @!P1 IMAD R2, R3, 0x4, R0 ; /* 0x0000000403029824 */
/* 0x000fe200078e0200 */
/*0130*/ @!P1 LDS R4, [R7.X4] ; /* 0x0000000007049984 */
/* 0x000fe20000004800 */
/*0140*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc60000011603 */
/*0150*/ @!P1 LDS R5, [R2] ; /* 0x0000000002059984 */
/* 0x000e240000000800 */
/*0160*/ @!P1 IADD3 R4, R4, R5, RZ ; /* 0x0000000504049210 */
/* 0x001fca0007ffe0ff */
/*0170*/ @!P1 STS [R7.X4], R4 ; /* 0x0000000407009388 */
/* 0x0001e80000004800 */
/*0180*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0190*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f25270 */
/*01a0*/ @P1 BRA 0x110 ; /* 0xffffff6000001947 */
/* 0x001fea000383ffff */
/*01b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*01c0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*01d0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*01e0*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x000fca00078e0003 */
/*01f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0200*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0210*/ BRA 0x210; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13sum_reductionPiS_
.globl _Z13sum_reductionPiS_
.p2align 8
.type _Z13sum_reductionPiS_,@function
_Z13sum_reductionPiS_:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x1c
s_load_b64 s[4:5], s[0:1], 0x0
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
s_cmp_lt_u32 s3, 2
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_add_co_u32 v1, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
global_load_b32 v2, v[1:2], off
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB0_2
.p2align 6
.LBB0_1:
s_or_b32 exec_lo, exec_lo, s5
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s3, 4
s_mov_b32 s3, s4
.LBB0_2:
buffer_gl0_inv
s_cbranch_scc1 .LBB0_5
s_lshr_b32 s4, s3, 1
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB0_1
v_add_lshl_u32 v2, s4, v0, 2
ds_load_b32 v2, v2
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB0_1
.LBB0_5:
s_mov_b32 s3, 0
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_7
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13sum_reductionPiS_
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13sum_reductionPiS_, .Lfunc_end0-_Z13sum_reductionPiS_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13max_reductionPiS_
.globl _Z13max_reductionPiS_
.p2align 8
.type _Z13max_reductionPiS_,@function
_Z13max_reductionPiS_:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x1c
s_load_b64 s[4:5], s[0:1], 0x0
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
s_cmp_lt_u32 s3, 2
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_add_co_u32 v1, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
global_load_b32 v2, v[1:2], off
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB1_2
.p2align 6
.LBB1_1:
s_or_b32 exec_lo, exec_lo, s5
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s3, 4
s_mov_b32 s3, s4
.LBB1_2:
buffer_gl0_inv
s_cbranch_scc1 .LBB1_5
s_lshr_b32 s4, s3, 1
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB1_1
v_add_lshl_u32 v2, s4, v0, 2
ds_load_b32 v3, v1
ds_load_b32 v2, v2
s_waitcnt lgkmcnt(0)
v_max_i32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB1_1
.LBB1_5:
s_mov_b32 s3, 0
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB1_7
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB1_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13max_reductionPiS_
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z13max_reductionPiS_, .Lfunc_end1-_Z13max_reductionPiS_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z8variancePiS_Pf
.globl _Z8variancePiS_Pf
.p2align 8
.type _Z8variancePiS_Pf,@function
_Z8variancePiS_Pf:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b64 s[6:7], s[0:1], 0x10
s_mov_b32 s2, s15
s_load_b64 s[4:5], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
s_cmp_lt_u32 s3, 2
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_add_co_u32 v1, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
global_load_b32 v2, v[1:2], off
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v2, v1
s_load_b32 s4, s[6:7], 0x0
s_waitcnt lgkmcnt(0)
v_cvt_f32_i32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_subrev_f32_e32 v2, s4, v2
v_mul_f32_e32 v2, v2, v2
s_delay_alu instid0(VALU_DEP_1)
v_cvt_i32_f32_e32 v2, v2
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB2_2
.p2align 6
.LBB2_1:
s_or_b32 exec_lo, exec_lo, s5
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s3, 4
s_mov_b32 s3, s4
.LBB2_2:
buffer_gl0_inv
s_cbranch_scc1 .LBB2_5
s_lshr_b32 s4, s3, 1
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB2_1
v_add_lshl_u32 v2, s4, v0, 2
ds_load_b32 v2, v2
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB2_1
.LBB2_5:
s_mov_b32 s3, 0
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB2_7
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB2_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8variancePiS_Pf
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z8variancePiS_Pf, .Lfunc_end2-_Z8variancePiS_Pf
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13min_reductionPiS_
.globl _Z13min_reductionPiS_
.p2align 8
.type _Z13min_reductionPiS_,@function
_Z13min_reductionPiS_:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x1c
s_load_b64 s[4:5], s[0:1], 0x0
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
s_cmp_lt_u32 s3, 2
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_add_co_u32 v1, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
global_load_b32 v2, v[1:2], off
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB3_2
.p2align 6
.LBB3_1:
s_or_b32 exec_lo, exec_lo, s5
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s3, 4
s_mov_b32 s3, s4
.LBB3_2:
buffer_gl0_inv
s_cbranch_scc1 .LBB3_5
s_lshr_b32 s4, s3, 1
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB3_1
v_add_lshl_u32 v2, s4, v0, 2
ds_load_b32 v3, v1
ds_load_b32 v2, v2
s_waitcnt lgkmcnt(0)
v_min_i32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB3_1
.LBB3_5:
s_mov_b32 s3, 0
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB3_7
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB3_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13min_reductionPiS_
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z13min_reductionPiS_, .Lfunc_end3-_Z13min_reductionPiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13sum_reductionPiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13sum_reductionPiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13max_reductionPiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13max_reductionPiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8variancePiS_Pf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8variancePiS_Pf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13min_reductionPiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13min_reductionPiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00030b20_00000000-6_reduction.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB10863:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10863:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z12inititialisePii
.type _Z12inititialisePii, @function
_Z12inititialisePii:
.LFB10859:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $274877907, %rdx, %rdx
sarq $38, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $1000, %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE10859:
.size _Z12inititialisePii, .-_Z12inititialisePii
.globl _Z35__device_stub__Z13sum_reductionPiS_PiS_
.type _Z35__device_stub__Z13sum_reductionPiS_PiS_, @function
_Z35__device_stub__Z13sum_reductionPiS_PiS_:
.LFB10885:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13sum_reductionPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10885:
.size _Z35__device_stub__Z13sum_reductionPiS_PiS_, .-_Z35__device_stub__Z13sum_reductionPiS_PiS_
.globl _Z13sum_reductionPiS_
.type _Z13sum_reductionPiS_, @function
_Z13sum_reductionPiS_:
.LFB10886:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z13sum_reductionPiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10886:
.size _Z13sum_reductionPiS_, .-_Z13sum_reductionPiS_
.globl _Z35__device_stub__Z13max_reductionPiS_PiS_
.type _Z35__device_stub__Z13max_reductionPiS_PiS_, @function
_Z35__device_stub__Z13max_reductionPiS_PiS_:
.LFB10887:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13max_reductionPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10887:
.size _Z35__device_stub__Z13max_reductionPiS_PiS_, .-_Z35__device_stub__Z13max_reductionPiS_PiS_
.globl _Z13max_reductionPiS_
.type _Z13max_reductionPiS_, @function
_Z13max_reductionPiS_:
.LFB10888:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z13max_reductionPiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10888:
.size _Z13max_reductionPiS_, .-_Z13max_reductionPiS_
.globl _Z31__device_stub__Z8variancePiS_PfPiS_Pf
.type _Z31__device_stub__Z8variancePiS_PfPiS_Pf, @function
_Z31__device_stub__Z8variancePiS_PfPiS_Pf:
.LFB10889:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8variancePiS_Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10889:
.size _Z31__device_stub__Z8variancePiS_PfPiS_Pf, .-_Z31__device_stub__Z8variancePiS_PfPiS_Pf
.globl _Z8variancePiS_Pf
.type _Z8variancePiS_Pf, @function
_Z8variancePiS_Pf:
.LFB10890:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z8variancePiS_PfPiS_Pf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10890:
.size _Z8variancePiS_Pf, .-_Z8variancePiS_Pf
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "speedup"
.text
.globl main
.type main, @function
main:
.LFB10860:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $96, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $262144, %edi
call malloc@PLT
movq %rax, %r12
leaq 24(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
movl $1024, %edi
call malloc@PLT
movq %rax, %r13
leaq 32(%rsp), %rdi
movl $1024, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $65536, %esi
movq %r12, %rdi
call _Z12inititialisePii
movq %r12, %rbx
leaq 262144(%r12), %rbp
movq %r12, %rax
movl $0, %ecx
.L36:
addl (%rax), %ecx
addq $4, %rax
cmpq %rbp, %rax
jne .L36
leal 65535(%rcx), %eax
testl %ecx, %ecx
cmovns %ecx, %eax
sarl $16, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 20(%rsp)
call clock@PLT
movq %rax, %r14
movss 20(%rsp), %xmm2
movl $0, %esi
.L37:
pxor %xmm0, %xmm0
cvtsi2ssl (%rbx), %xmm0
subss %xmm2, %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl %esi, %xmm1
mulss %xmm0, %xmm0
addss %xmm1, %xmm0
cvttss2sil %xmm0, %esi
addq $4, %rbx
cmpq %rbp, %rbx
jne .L37
leaq _ZSt4cout(%rip), %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
call clock@PLT
pxor %xmm0, %xmm0
cvtsi2ssq %rax, %xmm0
pxor %xmm1, %xmm1
cvtsi2ssq %r14, %xmm1
subss %xmm1, %xmm0
divss .LC0(%rip), %xmm0
mulss .LC1(%rip), %xmm0
movss %xmm0, 12(%rsp)
leaq 48(%rsp), %rdi
call cudaEventCreate@PLT
leaq 56(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, %ecx
movl $262144, %edx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
leaq 20(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $256, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L44
.L38:
movl $256, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L45
.L39:
movl $2, %ecx
movl $1024, %edx
movq 32(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl 0(%r13), %esi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $0, %esi
movq 56(%rsp), %rdi
call cudaEventRecord@PLT
movq 56(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 76(%rsp), %rdi
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
call cudaEventElapsedTime@PLT
movq 48(%rsp), %rdi
call cudaEventDestroy@PLT
movq 56(%rsp), %rdi
call cudaEventDestroy@PLT
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
pxor %xmm0, %xmm0
cvtss2sd 76(%rsp), %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC2(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movss 12(%rsp), %xmm0
divss 76(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L46
movl $0, %eax
addq $96, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L44:
.cfi_restore_state
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z31__device_stub__Z8variancePiS_PfPiS_Pf
jmp .L38
.L45:
movq 32(%rsp), %rdi
movq %rdi, %rsi
call _Z35__device_stub__Z13sum_reductionPiS_PiS_
jmp .L39
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10860:
.size main, .-main
.globl _Z35__device_stub__Z13min_reductionPiS_PiS_
.type _Z35__device_stub__Z13min_reductionPiS_PiS_, @function
_Z35__device_stub__Z13min_reductionPiS_PiS_:
.LFB10891:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L51
.L47:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L52
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L51:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13min_reductionPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L47
.L52:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE10891:
.size _Z35__device_stub__Z13min_reductionPiS_PiS_, .-_Z35__device_stub__Z13min_reductionPiS_PiS_
.globl _Z13min_reductionPiS_
.type _Z13min_reductionPiS_, @function
_Z13min_reductionPiS_:
.LFB10892:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z13min_reductionPiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10892:
.size _Z13min_reductionPiS_, .-_Z13min_reductionPiS_
.section .rodata.str1.1
.LC3:
.string "_Z13min_reductionPiS_"
.LC4:
.string "_Z8variancePiS_Pf"
.LC5:
.string "_Z13max_reductionPiS_"
.LC6:
.string "_Z13sum_reductionPiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB10894:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z13min_reductionPiS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z8variancePiS_Pf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z13max_reductionPiS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z13sum_reductionPiS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE10894:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1232348160
.align 4
.LC1:
.long 1148846080
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "reduction.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z28__device_stub__sum_reductionPiS_ # -- Begin function _Z28__device_stub__sum_reductionPiS_
.p2align 4, 0x90
.type _Z28__device_stub__sum_reductionPiS_,@function
_Z28__device_stub__sum_reductionPiS_: # @_Z28__device_stub__sum_reductionPiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z13sum_reductionPiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z28__device_stub__sum_reductionPiS_, .Lfunc_end0-_Z28__device_stub__sum_reductionPiS_
.cfi_endproc
# -- End function
.globl _Z28__device_stub__max_reductionPiS_ # -- Begin function _Z28__device_stub__max_reductionPiS_
.p2align 4, 0x90
.type _Z28__device_stub__max_reductionPiS_,@function
_Z28__device_stub__max_reductionPiS_: # @_Z28__device_stub__max_reductionPiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z13max_reductionPiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z28__device_stub__max_reductionPiS_, .Lfunc_end1-_Z28__device_stub__max_reductionPiS_
.cfi_endproc
# -- End function
.globl _Z23__device_stub__variancePiS_Pf # -- Begin function _Z23__device_stub__variancePiS_Pf
.p2align 4, 0x90
.type _Z23__device_stub__variancePiS_Pf,@function
_Z23__device_stub__variancePiS_Pf: # @_Z23__device_stub__variancePiS_Pf
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8variancePiS_Pf, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z23__device_stub__variancePiS_Pf, .Lfunc_end2-_Z23__device_stub__variancePiS_Pf
.cfi_endproc
# -- End function
.globl _Z28__device_stub__min_reductionPiS_ # -- Begin function _Z28__device_stub__min_reductionPiS_
.p2align 4, 0x90
.type _Z28__device_stub__min_reductionPiS_,@function
_Z28__device_stub__min_reductionPiS_: # @_Z28__device_stub__min_reductionPiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z13min_reductionPiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end3:
.size _Z28__device_stub__min_reductionPiS_, .Lfunc_end3-_Z28__device_stub__min_reductionPiS_
.cfi_endproc
# -- End function
.globl _Z12inititialisePii # -- Begin function _Z12inititialisePii
.p2align 4, 0x90
.type _Z12inititialisePii,@function
_Z12inititialisePii: # @_Z12inititialisePii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB4_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB4_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB4_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB4_4: # %._crit_edge
retq
.Lfunc_end4:
.size _Z12inititialisePii, .Lfunc_end4-_Z12inititialisePii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI5_0:
.long 0x49742400 # float 1.0E+6
.LCPI5_1:
.long 0x447a0000 # float 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $144, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $262144, %edi # imm = 0x40000
callq malloc
movq %rax, %r15
leaq 128(%rsp), %rdi
movl $262144, %esi # imm = 0x40000
callq hipMalloc
movl $1024, %edi # imm = 0x400
callq malloc
movq %rax, %rbx
leaq 24(%rsp), %rdi
movl $1024, %esi # imm = 0x400
callq hipMalloc
leaq 120(%rsp), %rdi
movl $4, %esi
callq hipMalloc
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB5_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%r15,%r14,4)
incq %r14
cmpq $65536, %r14 # imm = 0x10000
jne .LBB5_1
# %bb.2: # %_Z12inititialisePii.exit.preheader
xorl %ecx, %ecx
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_3: # %_Z12inititialisePii.exit
# =>This Inner Loop Header: Depth=1
addl (%r15,%rcx,4), %eax
incq %rcx
cmpq $65536, %rcx # imm = 0x10000
jne .LBB5_3
# %bb.4:
leal 65535(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $16, %ecx
cvtsi2ss %ecx, %xmm0
movss %xmm0, 4(%rsp)
callq clock
movq %rax, %r14
xorl %eax, %eax
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorl %esi, %esi
.p2align 4, 0x90
.LBB5_5: # =>This Inner Loop Header: Depth=1
xorps %xmm1, %xmm1
cvtsi2ss %esi, %xmm1
xorps %xmm2, %xmm2
cvtsi2ssl (%r15,%rax,4), %xmm2
subss %xmm0, %xmm2
mulss %xmm2, %xmm2
addss %xmm1, %xmm2
cvttss2si %xmm2, %esi
incq %rax
cmpq $65536, %rax # imm = 0x10000
jne .LBB5_5
# %bb.6:
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB5_31
# %bb.7: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r12)
je .LBB5_9
# %bb.8:
movzbl 67(%r12), %ecx
jmp .LBB5_10
.LBB5_9:
movq %r12, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB5_10: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movabsq $4294967552, %r13 # imm = 0x100000100
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
callq clock
movq %rax, %r12
leaq 16(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 128(%rsp), %rdi
movl $262144, %edx # imm = 0x40000
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 120(%rsp), %rdi
leaq 4(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movq %r13, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_12
# %bb.11:
movq 128(%rsp), %rax
movq 24(%rsp), %rcx
movq 120(%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 40(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 136(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z8variancePiS_Pf, %edi
pushq 136(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_12:
leaq -255(%r13), %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_14
# %bb.13:
movq 24(%rsp), %rax
movq %rax, 88(%rsp)
movq %rax, 80(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13sum_reductionPiS_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_14:
movq 24(%rsp), %rsi
movl $1024, %edx # imm = 0x400
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl (%rbx), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB5_31
# %bb.15: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i53
cvtsi2ss %r12, %xmm1
cvtsi2ss %r14, %xmm0
subss %xmm0, %xmm1
divss .LCPI5_0(%rip), %xmm1
mulss .LCPI5_1(%rip), %xmm1
movss %xmm1, (%rsp) # 4-byte Spill
cmpb $0, 56(%rbx)
je .LBB5_17
# %bb.16:
movzbl 67(%rbx), %ecx
jmp .LBB5_18
.LBB5_17:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB5_18: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit56
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 16(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 96(%rsp), %rdi
callq hipEventElapsedTime
movq 16(%rsp), %rdi
callq hipEventDestroy
movq 8(%rsp), %rdi
callq hipEventDestroy
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB5_31
# %bb.19: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i58
cmpb $0, 56(%rbx)
je .LBB5_21
# %bb.20:
movzbl 67(%rbx), %ecx
jmp .LBB5_22
.LBB5_21:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB5_22: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit61
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movss 96(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB5_31
# %bb.23: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i63
cmpb $0, 56(%rbx)
je .LBB5_25
# %bb.24:
movzbl 67(%rbx), %ecx
jmp .LBB5_26
.LBB5_25:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB5_26: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit66
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
divss 96(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB5_31
# %bb.27: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i68
cmpb $0, 56(%rbx)
je .LBB5_29
# %bb.28:
movzbl 67(%rbx), %ecx
jmp .LBB5_30
.LBB5_29:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB5_30: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit71
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $144, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB5_31:
.cfi_def_cfa_offset 192
callq _ZSt16__throw_bad_castv
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13sum_reductionPiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13max_reductionPiS_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8variancePiS_Pf, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13min_reductionPiS_, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13sum_reductionPiS_,@object # @_Z13sum_reductionPiS_
.section .rodata,"a",@progbits
.globl _Z13sum_reductionPiS_
.p2align 3, 0x0
_Z13sum_reductionPiS_:
.quad _Z28__device_stub__sum_reductionPiS_
.size _Z13sum_reductionPiS_, 8
.type _Z13max_reductionPiS_,@object # @_Z13max_reductionPiS_
.globl _Z13max_reductionPiS_
.p2align 3, 0x0
_Z13max_reductionPiS_:
.quad _Z28__device_stub__max_reductionPiS_
.size _Z13max_reductionPiS_, 8
.type _Z8variancePiS_Pf,@object # @_Z8variancePiS_Pf
.globl _Z8variancePiS_Pf
.p2align 3, 0x0
_Z8variancePiS_Pf:
.quad _Z23__device_stub__variancePiS_Pf
.size _Z8variancePiS_Pf, 8
.type _Z13min_reductionPiS_,@object # @_Z13min_reductionPiS_
.globl _Z13min_reductionPiS_
.p2align 3, 0x0
_Z13min_reductionPiS_:
.quad _Z28__device_stub__min_reductionPiS_
.size _Z13min_reductionPiS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "speedup"
.size .L.str, 8
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z13sum_reductionPiS_"
.size .L__unnamed_1, 22
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z13max_reductionPiS_"
.size .L__unnamed_2, 22
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z8variancePiS_Pf"
.size .L__unnamed_3, 18
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z13min_reductionPiS_"
.size .L__unnamed_4, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__sum_reductionPiS_
.addrsig_sym _Z28__device_stub__max_reductionPiS_
.addrsig_sym _Z23__device_stub__variancePiS_Pf
.addrsig_sym _Z28__device_stub__min_reductionPiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13sum_reductionPiS_
.addrsig_sym _Z13max_reductionPiS_
.addrsig_sym _Z8variancePiS_Pf
.addrsig_sym _Z13min_reductionPiS_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /**
https://blog.csdn.net/Bruce_0712/article/details/64928442
cudaDeviceSynchronize():该方法将停止CPU端线程的执行,直到GPU端完成之前CUDA的任务,包括kernel函数、数据拷贝等。
cudaThreadSynchronize():该方法的作用和cudaDeviceSynchronize()基本相同,但它不是一个被推荐的方法,也许在后期版本的CUDA中会被删除。
cudaStreamSynchronize():这个方法接受一个stream ID,它将阻止CPU执行直到GPU端完成相应stream ID的所有CUDA任务,但其它stream中的CUDA任务可能执行完也可能没有执行完。
跨warp进行同步,您需要使用 __ syncthreads()
(1)在同一个warp内的线程读写shared/global 不需同步,
读写global和shared是立刻对本warp内的其他线程立刻可见的。
(2)在同一个block内的不同warp内线程读写shared/global 需同步,
这种读写必须使用__syncthreads(), 或者__threadfence()来实现不同的读写可见效果。
(3)在同一个grid内的不同block内的线程读写shared/gloabl 需要同步
这种读写必须使用__threadfence*()来实现一定的读写可见效果。
// 执行结果
[vec_add.cu:59] GPU 实现向量的加法
allocated 76.29 MB on GPU
GPU cost time=0.109505s last_num=4e+07
CPU cost time=2.46585s last_num=4e+07
*/
#include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <ctime>
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
#define use_warmup 1
using namespace std;
typedef float FLOAT;
// GPU预热(可以提升GPU计算速度)
void warmup();
// CPU 向量加法
// __host__ void vec_add_host(FLOAT *x,FLOAT* y,FLOAT *z,int N);
// or host函数__host__可以省略
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N);
// GPU 函数
__global__ void vec_add(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
// 获取线程id(同时有很多个线程在执行这个函数,通过线程id区分)
/**
* <<<(256,256),256>>> grid 2维 block 1维 tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y
* <<<256,256>>> grid 1维 block 1维 tid=threadIdx.x+blockDim.x*blockIdx.x
* <<<1,256>>> grid 1维 block 1维 tid=threadIdx.x
* <<<256,1>>> grid 1维 block 1维 tid=blockDim.x*blockIdx.x
*/
int tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y;
if (tid<N) z[tid]=x[tid]+y[tid]; // 开的线程数必须大于数据总数,保证每个数据都能参与计算
// __syncthreads(); // 线程同步
}
int main(int argc, char *argv[])
{
mycout<<"GPU 实现向量的加法"<<endl;
int N = 20000000;
int nbytes = N * sizeof(FLOAT);
/* 1D block */
int bs = 256;
/* 2D grid */
// int s = ceil(sqrt((N + bs - 1.) / bs));
int s = ceil(sqrt(1.0*N / bs));
dim3 grid = dim3(s, s);
// dx 表示gpu变量,hx表示cpu变量
FLOAT *dx = NULL, *hx = NULL;
FLOAT *dy = NULL, *hy = NULL;
FLOAT *dz = NULL, *hz = NULL;
int itr = 30;
int i;
// double th, td;
/**======1、CPU 创建变量赋值==========*/
/* alllocate CPU mem */
hx = (FLOAT *) malloc(nbytes);
hy = (FLOAT *) malloc(nbytes);
hz = (FLOAT *) malloc(nbytes);
if (hx == NULL || hy == NULL || hz == NULL) {
// printf("couldn't allocate CPU memory\n");
mycout<<"couldn't allocate CPU memory"<<endl;
return -2;
}
/*给CPU变量赋值*/
// fill the arrays 'hx' and 'hy' on the CPU
for (int i=0; i<N; i++) {
hx[i] = i;
hy[i] = i ;
}
/* warm up GPU */
#if use_warmup
warmup(); // 预热
#endif // use_warmup
/**======2、GPU 分配内存======*/
/* allocate GPU mem */
cudaMalloc((void **)&dx, nbytes);
cudaMalloc((void **)&dy, nbytes);
cudaMalloc((void **)&dz, nbytes);
if (dx == NULL || dy == NULL || dz == NULL) {
// printf("couldn't allocate GPU memory\n");
mycout<<"couldn't allocate GPU memory"<<endl;
return -1;
}
printf("allocated %.2f MB on GPU\n", nbytes / (1024.f * 1024.f));
/**======3、将CPU数据拷贝给GPU======*/
/** copy data to GPU */
cudaMemcpy(dx, hx, nbytes, cudaMemcpyHostToDevice);
cudaMemcpy(dy, hy, nbytes, cudaMemcpyHostToDevice);
/* warm up GPU */
#if use_warmup
warmup(); // 预热
#endif // use_warmup
/**======4、调用GPU计算======*/
/* call GPU */
// cudaThreadSynchronize(); // 线程同步,等到前面的GPU数据拷贝完成
cudaDeviceSynchronize(); // cudaThreadSynchronize() 弃用了
clock_t start = clock();
for (i = 0; i < itr; i++) vec_add<<<grid, bs>>>(dx, dy, dz, N);
// cudaThreadSynchronize(); // 线程同步 等待所有线程处理完成
cudaDeviceSynchronize(); // cudaThreadSynchronize() 弃用了
/**======5、GPU计算结果拷贝给CPU======*/
cudaMemcpy(hz,dz, nbytes, cudaMemcpyDeviceToHost);
cout<<"GPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// 计算CPU的时间
start = clock();
for (i = 0; i < itr; i++) vec_add_host(hx, hy, hz, N);
cout<<"CPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// 释放内存
cudaFree(dx);
cudaFree(dy);
cudaFree(dz);
free(hx);
free(hy);
free(hz);
return 0;
}
/* warm up GPU */
__global__ void warmup_knl()
{
int i, j;
i = 1;
j = 2;
i = i + j;
}
void warmup()
{
int i;
for (i = 0; i < 8; i++) {
warmup_knl<<<1, 256>>>();
}
}
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
for(int i=0;i<N;i++)
{
z[i]=x[i]+y[i];
}
} | code for sm_80
Function : _Z10warmup_knlv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z7vec_addPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002600 */
/*0020*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e280000002500 */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e620000002100 */
/*0040*/ IMAD R0, R0, c[0x0][0xc], R5 ; /* 0x0000030000007a24 */
/* 0x001fc800078e0205 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0203 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fda0003f06270 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*00a0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0207 */
/*00b0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x0c0fe400078e0207 */
/*00c0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00d0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00e0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e0207 */
/*00f0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*0100*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0110*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0120*/ BRA 0x120; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /**
https://blog.csdn.net/Bruce_0712/article/details/64928442
cudaDeviceSynchronize():该方法将停止CPU端线程的执行,直到GPU端完成之前CUDA的任务,包括kernel函数、数据拷贝等。
cudaThreadSynchronize():该方法的作用和cudaDeviceSynchronize()基本相同,但它不是一个被推荐的方法,也许在后期版本的CUDA中会被删除。
cudaStreamSynchronize():这个方法接受一个stream ID,它将阻止CPU执行直到GPU端完成相应stream ID的所有CUDA任务,但其它stream中的CUDA任务可能执行完也可能没有执行完。
跨warp进行同步,您需要使用 __ syncthreads()
(1)在同一个warp内的线程读写shared/global 不需同步,
读写global和shared是立刻对本warp内的其他线程立刻可见的。
(2)在同一个block内的不同warp内线程读写shared/global 需同步,
这种读写必须使用__syncthreads(), 或者__threadfence()来实现不同的读写可见效果。
(3)在同一个grid内的不同block内的线程读写shared/gloabl 需要同步
这种读写必须使用__threadfence*()来实现一定的读写可见效果。
// 执行结果
[vec_add.cu:59] GPU 实现向量的加法
allocated 76.29 MB on GPU
GPU cost time=0.109505s last_num=4e+07
CPU cost time=2.46585s last_num=4e+07
*/
#include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <ctime>
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
#define use_warmup 1
using namespace std;
typedef float FLOAT;
// GPU预热(可以提升GPU计算速度)
void warmup();
// CPU 向量加法
// __host__ void vec_add_host(FLOAT *x,FLOAT* y,FLOAT *z,int N);
// or host函数__host__可以省略
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N);
// GPU 函数
__global__ void vec_add(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
// 获取线程id(同时有很多个线程在执行这个函数,通过线程id区分)
/**
* <<<(256,256),256>>> grid 2维 block 1维 tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y
* <<<256,256>>> grid 1维 block 1维 tid=threadIdx.x+blockDim.x*blockIdx.x
* <<<1,256>>> grid 1维 block 1维 tid=threadIdx.x
* <<<256,1>>> grid 1维 block 1维 tid=blockDim.x*blockIdx.x
*/
int tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y;
if (tid<N) z[tid]=x[tid]+y[tid]; // 开的线程数必须大于数据总数,保证每个数据都能参与计算
// __syncthreads(); // 线程同步
}
int main(int argc, char *argv[])
{
mycout<<"GPU 实现向量的加法"<<endl;
int N = 20000000;
int nbytes = N * sizeof(FLOAT);
/* 1D block */
int bs = 256;
/* 2D grid */
// int s = ceil(sqrt((N + bs - 1.) / bs));
int s = ceil(sqrt(1.0*N / bs));
dim3 grid = dim3(s, s);
// dx 表示gpu变量,hx表示cpu变量
FLOAT *dx = NULL, *hx = NULL;
FLOAT *dy = NULL, *hy = NULL;
FLOAT *dz = NULL, *hz = NULL;
int itr = 30;
int i;
// double th, td;
/**======1、CPU 创建变量赋值==========*/
/* alllocate CPU mem */
hx = (FLOAT *) malloc(nbytes);
hy = (FLOAT *) malloc(nbytes);
hz = (FLOAT *) malloc(nbytes);
if (hx == NULL || hy == NULL || hz == NULL) {
// printf("couldn't allocate CPU memory\n");
mycout<<"couldn't allocate CPU memory"<<endl;
return -2;
}
/*给CPU变量赋值*/
// fill the arrays 'hx' and 'hy' on the CPU
for (int i=0; i<N; i++) {
hx[i] = i;
hy[i] = i ;
}
/* warm up GPU */
#if use_warmup
warmup(); // 预热
#endif // use_warmup
/**======2、GPU 分配内存======*/
/* allocate GPU mem */
cudaMalloc((void **)&dx, nbytes);
cudaMalloc((void **)&dy, nbytes);
cudaMalloc((void **)&dz, nbytes);
if (dx == NULL || dy == NULL || dz == NULL) {
// printf("couldn't allocate GPU memory\n");
mycout<<"couldn't allocate GPU memory"<<endl;
return -1;
}
printf("allocated %.2f MB on GPU\n", nbytes / (1024.f * 1024.f));
/**======3、将CPU数据拷贝给GPU======*/
/** copy data to GPU */
cudaMemcpy(dx, hx, nbytes, cudaMemcpyHostToDevice);
cudaMemcpy(dy, hy, nbytes, cudaMemcpyHostToDevice);
/* warm up GPU */
#if use_warmup
warmup(); // 预热
#endif // use_warmup
/**======4、调用GPU计算======*/
/* call GPU */
// cudaThreadSynchronize(); // 线程同步,等到前面的GPU数据拷贝完成
cudaDeviceSynchronize(); // cudaThreadSynchronize() 弃用了
clock_t start = clock();
for (i = 0; i < itr; i++) vec_add<<<grid, bs>>>(dx, dy, dz, N);
// cudaThreadSynchronize(); // 线程同步 等待所有线程处理完成
cudaDeviceSynchronize(); // cudaThreadSynchronize() 弃用了
/**======5、GPU计算结果拷贝给CPU======*/
cudaMemcpy(hz,dz, nbytes, cudaMemcpyDeviceToHost);
cout<<"GPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// 计算CPU的时间
start = clock();
for (i = 0; i < itr; i++) vec_add_host(hx, hy, hz, N);
cout<<"CPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// 释放内存
cudaFree(dx);
cudaFree(dy);
cudaFree(dz);
free(hx);
free(hy);
free(hz);
return 0;
}
/* warm up GPU */
__global__ void warmup_knl()
{
int i, j;
i = 1;
j = 2;
i = i + j;
}
void warmup()
{
int i;
for (i = 0; i < 8; i++) {
warmup_knl<<<1, 256>>>();
}
}
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
for(int i=0;i<N;i++)
{
z[i]=x[i]+y[i];
}
} | .file "tmpxft_000ed302_00000000-6_vec_add1.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z12vec_add_hostPfS_S_i
.type _Z12vec_add_hostPfS_S_i, @function
_Z12vec_add_hostPfS_S_i:
.LFB3671:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L3
movslq %ecx, %rcx
salq $2, %rcx
movl $0, %eax
.L5:
movss (%rdi,%rax), %xmm0
addss (%rsi,%rax), %xmm0
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq %rcx, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE3671:
.size _Z12vec_add_hostPfS_S_i, .-_Z12vec_add_hostPfS_S_i
.globl _Z31__device_stub__Z7vec_addPfS_S_iPfS_S_i
.type _Z31__device_stub__Z7vec_addPfS_S_iPfS_S_i, @function
_Z31__device_stub__Z7vec_addPfS_S_iPfS_S_i:
.LFB3696:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7vec_addPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z31__device_stub__Z7vec_addPfS_S_iPfS_S_i, .-_Z31__device_stub__Z7vec_addPfS_S_iPfS_S_i
.globl _Z7vec_addPfS_S_i
.type _Z7vec_addPfS_S_i, @function
_Z7vec_addPfS_S_i:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z7vec_addPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z7vec_addPfS_S_i, .-_Z7vec_addPfS_S_i
.globl _Z29__device_stub__Z10warmup_knlvv
.type _Z29__device_stub__Z10warmup_knlvv, @function
_Z29__device_stub__Z10warmup_knlvv:
.LFB3698:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z10warmup_knlv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3698:
.size _Z29__device_stub__Z10warmup_knlvv, .-_Z29__device_stub__Z10warmup_knlvv
.globl _Z10warmup_knlv
.type _Z10warmup_knlv, @function
_Z10warmup_knlv:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z10warmup_knlvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _Z10warmup_knlv, .-_Z10warmup_knlv
.globl _Z6warmupv
.type _Z6warmupv, @function
_Z6warmupv:
.LFB3670:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $32, %rsp
.cfi_def_cfa_offset 48
movl $8, %ebx
jmp .L25
.L24:
subl $1, %ebx
je .L28
.L25:
movl $256, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L24
call _Z29__device_stub__Z10warmup_knlvv
jmp .L24
.L28:
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3670:
.size _Z6warmupv, .-_Z6warmupv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "["
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "/home/ubuntu/Datasets/stackv2/train-structured/wucng/Study/master/cuda/example/vec_add1.cu"
.section .rodata.str1.1
.LC2:
.string ":"
.LC3:
.string "] "
.LC4:
.string "GPU \345\256\236\347\216\260\345\220\221\351\207\217\347\232\204\345\212\240\346\263\225"
.LC5:
.string "couldn't allocate CPU memory"
.LC6:
.string "couldn't allocate GPU memory"
.LC8:
.string "allocated %.2f MB on GPU\n"
.LC9:
.string "GPU cost time="
.LC11:
.string "s\t"
.LC12:
.string "last_num="
.LC13:
.string "CPU cost time="
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC1(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $67, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC3(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC4(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $280, 32(%rsp)
movl $280, 36(%rsp)
movl $1, 40(%rsp)
movq $0, 8(%rsp)
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movl $80000000, %edi
call malloc@PLT
movq %rax, %rbp
movl $80000000, %edi
call malloc@PLT
movq %rax, %rbx
movl $80000000, %edi
call malloc@PLT
movq %rax, %r13
testq %rbp, %rbp
sete %al
testq %rbx, %rbx
sete %dl
orb %dl, %al
jne .L40
testq %r13, %r13
je .L40
movl $0, %eax
.L30:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rax,4)
movss %xmm0, (%rbx,%rax,4)
addq $1, %rax
cmpq $20000000, %rax
jne .L30
call _Z6warmupv
leaq 8(%rsp), %rdi
movl $80000000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $80000000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $80000000, %esi
call cudaMalloc@PLT
cmpq $0, 8(%rsp)
je .L33
cmpq $0, 16(%rsp)
je .L33
cmpq $0, 24(%rsp)
je .L33
movsd .LC7(%rip), %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $80000000, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $80000000, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
call _Z6warmupv
call cudaDeviceSynchronize@PLT
call clock@PLT
movq %rax, %r14
movl $30, %r12d
jmp .L36
.L40:
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC1(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $97, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC3(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC5(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $-2, %eax
jmp .L29
.L33:
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC1(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $121, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC3(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC6(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $-1, %eax
jmp .L29
.L35:
subl $1, %r12d
je .L45
.L36:
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl 40(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L35
movl $20000000, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z31__device_stub__Z7vec_addPfS_S_iPfS_S_i
jmp .L35
.L45:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movl $80000000, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
leaq .LC9(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %r12
call clock@PLT
subq %r14, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC10(%rip), %xmm0
movq %r12, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
leaq .LC11(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC12(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 79999996(%r13), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
call clock@PLT
movq %rax, %r14
movl $30, %r12d
.L37:
movl $20000000, %ecx
movq %r13, %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z12vec_add_hostPfS_S_i
subl $1, %r12d
jne .L37
leaq .LC13(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %r12
call clock@PLT
subq %r14, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC10(%rip), %xmm0
movq %r12, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
leaq .LC11(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC12(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 79999996(%r13), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movl $0, %eax
.L29:
movq 56(%rsp), %rdx
subq %fs:40, %rdx
jne .L46
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L46:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC14:
.string "_Z10warmup_knlv"
.LC15:
.string "_Z7vec_addPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3701:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z10warmup_knlv(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z7vec_addPfS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3701:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC7:
.long 0
.long 1079186128
.align 8
.LC10:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /**
https://blog.csdn.net/Bruce_0712/article/details/64928442
cudaDeviceSynchronize():该方法将停止CPU端线程的执行,直到GPU端完成之前CUDA的任务,包括kernel函数、数据拷贝等。
cudaThreadSynchronize():该方法的作用和cudaDeviceSynchronize()基本相同,但它不是一个被推荐的方法,也许在后期版本的CUDA中会被删除。
cudaStreamSynchronize():这个方法接受一个stream ID,它将阻止CPU执行直到GPU端完成相应stream ID的所有CUDA任务,但其它stream中的CUDA任务可能执行完也可能没有执行完。
跨warp进行同步,您需要使用 __ syncthreads()
(1)在同一个warp内的线程读写shared/global 不需同步,
读写global和shared是立刻对本warp内的其他线程立刻可见的。
(2)在同一个block内的不同warp内线程读写shared/global 需同步,
这种读写必须使用__syncthreads(), 或者__threadfence()来实现不同的读写可见效果。
(3)在同一个grid内的不同block内的线程读写shared/gloabl 需要同步
这种读写必须使用__threadfence*()来实现一定的读写可见效果。
// 执行结果
[vec_add.cu:59] GPU 实现向量的加法
allocated 76.29 MB on GPU
GPU cost time=0.109505s last_num=4e+07
CPU cost time=2.46585s last_num=4e+07
*/
#include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <ctime>
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
#define use_warmup 1
using namespace std;
typedef float FLOAT;
// GPU预热(可以提升GPU计算速度)
void warmup();
// CPU 向量加法
// __host__ void vec_add_host(FLOAT *x,FLOAT* y,FLOAT *z,int N);
// or host函数__host__可以省略
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N);
// GPU 函数
__global__ void vec_add(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
// 获取线程id(同时有很多个线程在执行这个函数,通过线程id区分)
/**
* <<<(256,256),256>>> grid 2维 block 1维 tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y
* <<<256,256>>> grid 1维 block 1维 tid=threadIdx.x+blockDim.x*blockIdx.x
* <<<1,256>>> grid 1维 block 1维 tid=threadIdx.x
* <<<256,1>>> grid 1维 block 1维 tid=blockDim.x*blockIdx.x
*/
int tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y;
if (tid<N) z[tid]=x[tid]+y[tid]; // 开的线程数必须大于数据总数,保证每个数据都能参与计算
// __syncthreads(); // 线程同步
}
int main(int argc, char *argv[])
{
mycout<<"GPU 实现向量的加法"<<endl;
int N = 20000000;
int nbytes = N * sizeof(FLOAT);
/* 1D block */
int bs = 256;
/* 2D grid */
// int s = ceil(sqrt((N + bs - 1.) / bs));
int s = ceil(sqrt(1.0*N / bs));
dim3 grid = dim3(s, s);
// dx 表示gpu变量,hx表示cpu变量
FLOAT *dx = NULL, *hx = NULL;
FLOAT *dy = NULL, *hy = NULL;
FLOAT *dz = NULL, *hz = NULL;
int itr = 30;
int i;
// double th, td;
/**======1、CPU 创建变量赋值==========*/
/* alllocate CPU mem */
hx = (FLOAT *) malloc(nbytes);
hy = (FLOAT *) malloc(nbytes);
hz = (FLOAT *) malloc(nbytes);
if (hx == NULL || hy == NULL || hz == NULL) {
// printf("couldn't allocate CPU memory\n");
mycout<<"couldn't allocate CPU memory"<<endl;
return -2;
}
/*给CPU变量赋值*/
// fill the arrays 'hx' and 'hy' on the CPU
for (int i=0; i<N; i++) {
hx[i] = i;
hy[i] = i ;
}
/* warm up GPU */
#if use_warmup
warmup(); // 预热
#endif // use_warmup
/**======2、GPU 分配内存======*/
/* allocate GPU mem */
cudaMalloc((void **)&dx, nbytes);
cudaMalloc((void **)&dy, nbytes);
cudaMalloc((void **)&dz, nbytes);
if (dx == NULL || dy == NULL || dz == NULL) {
// printf("couldn't allocate GPU memory\n");
mycout<<"couldn't allocate GPU memory"<<endl;
return -1;
}
printf("allocated %.2f MB on GPU\n", nbytes / (1024.f * 1024.f));
/**======3、将CPU数据拷贝给GPU======*/
/** copy data to GPU */
cudaMemcpy(dx, hx, nbytes, cudaMemcpyHostToDevice);
cudaMemcpy(dy, hy, nbytes, cudaMemcpyHostToDevice);
/* warm up GPU */
#if use_warmup
warmup(); // 预热
#endif // use_warmup
/**======4、调用GPU计算======*/
/* call GPU */
// cudaThreadSynchronize(); // 线程同步,等到前面的GPU数据拷贝完成
cudaDeviceSynchronize(); // cudaThreadSynchronize() 弃用了
clock_t start = clock();
for (i = 0; i < itr; i++) vec_add<<<grid, bs>>>(dx, dy, dz, N);
// cudaThreadSynchronize(); // 线程同步 等待所有线程处理完成
cudaDeviceSynchronize(); // cudaThreadSynchronize() 弃用了
/**======5、GPU计算结果拷贝给CPU======*/
cudaMemcpy(hz,dz, nbytes, cudaMemcpyDeviceToHost);
cout<<"GPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// 计算CPU的时间
start = clock();
for (i = 0; i < itr; i++) vec_add_host(hx, hy, hz, N);
cout<<"CPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// 释放内存
cudaFree(dx);
cudaFree(dy);
cudaFree(dz);
free(hx);
free(hy);
free(hz);
return 0;
}
/* warm up GPU */
__global__ void warmup_knl()
{
int i, j;
i = 1;
j = 2;
i = i + j;
}
void warmup()
{
int i;
for (i = 0; i < 8; i++) {
warmup_knl<<<1, 256>>>();
}
}
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
for(int i=0;i<N;i++)
{
z[i]=x[i]+y[i];
}
} | /**
https://blog.csdn.net/Bruce_0712/article/details/64928442
cudaDeviceSynchronize():该方法将停止CPU端线程的执行,直到GPU端完成之前CUDA的任务,包括kernel函数、数据拷贝等。
cudaThreadSynchronize():该方法的作用和cudaDeviceSynchronize()基本相同,但它不是一个被推荐的方法,也许在后期版本的CUDA中会被删除。
cudaStreamSynchronize():这个方法接受一个stream ID,它将阻止CPU执行直到GPU端完成相应stream ID的所有CUDA任务,但其它stream中的CUDA任务可能执行完也可能没有执行完。
跨warp进行同步,您需要使用 __ syncthreads()
(1)在同一个warp内的线程读写shared/global 不需同步,
读写global和shared是立刻对本warp内的其他线程立刻可见的。
(2)在同一个block内的不同warp内线程读写shared/global 需同步,
这种读写必须使用__syncthreads(), 或者__threadfence()来实现不同的读写可见效果。
(3)在同一个grid内的不同block内的线程读写shared/gloabl 需要同步
这种读写必须使用__threadfence*()来实现一定的读写可见效果。
// 执行结果
[vec_add.cu:59] GPU 实现向量的加法
allocated 76.29 MB on GPU
GPU cost time=0.109505s last_num=4e+07
CPU cost time=2.46585s last_num=4e+07
*/
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <ctime>
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
#define use_warmup 1
using namespace std;
typedef float FLOAT;
// GPU预热(可以提升GPU计算速度)
void warmup();
// CPU 向量加法
// __host__ void vec_add_host(FLOAT *x,FLOAT* y,FLOAT *z,int N);
// or host函数__host__可以省略
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N);
// GPU 函数
__global__ void vec_add(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
// 获取线程id(同时有很多个线程在执行这个函数,通过线程id区分)
/**
* <<<(256,256),256>>> grid 2维 block 1维 tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y
* <<<256,256>>> grid 1维 block 1维 tid=threadIdx.x+blockDim.x*blockIdx.x
* <<<1,256>>> grid 1维 block 1维 tid=threadIdx.x
* <<<256,1>>> grid 1维 block 1维 tid=blockDim.x*blockIdx.x
*/
int tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y;
if (tid<N) z[tid]=x[tid]+y[tid]; // 开的线程数必须大于数据总数,保证每个数据都能参与计算
// __syncthreads(); // 线程同步
}
int main(int argc, char *argv[])
{
mycout<<"GPU 实现向量的加法"<<endl;
int N = 20000000;
int nbytes = N * sizeof(FLOAT);
/* 1D block */
int bs = 256;
/* 2D grid */
// int s = ceil(sqrt((N + bs - 1.) / bs));
int s = ceil(sqrt(1.0*N / bs));
dim3 grid = dim3(s, s);
// dx 表示gpu变量,hx表示cpu变量
FLOAT *dx = NULL, *hx = NULL;
FLOAT *dy = NULL, *hy = NULL;
FLOAT *dz = NULL, *hz = NULL;
int itr = 30;
int i;
// double th, td;
/**======1、CPU 创建变量赋值==========*/
/* alllocate CPU mem */
hx = (FLOAT *) malloc(nbytes);
hy = (FLOAT *) malloc(nbytes);
hz = (FLOAT *) malloc(nbytes);
if (hx == NULL || hy == NULL || hz == NULL) {
// printf("couldn't allocate CPU memory\n");
mycout<<"couldn't allocate CPU memory"<<endl;
return -2;
}
/*给CPU变量赋值*/
// fill the arrays 'hx' and 'hy' on the CPU
for (int i=0; i<N; i++) {
hx[i] = i;
hy[i] = i ;
}
/* warm up GPU */
#if use_warmup
warmup(); // 预热
#endif // use_warmup
/**======2、GPU 分配内存======*/
/* allocate GPU mem */
hipMalloc((void **)&dx, nbytes);
hipMalloc((void **)&dy, nbytes);
hipMalloc((void **)&dz, nbytes);
if (dx == NULL || dy == NULL || dz == NULL) {
// printf("couldn't allocate GPU memory\n");
mycout<<"couldn't allocate GPU memory"<<endl;
return -1;
}
printf("allocated %.2f MB on GPU\n", nbytes / (1024.f * 1024.f));
/**======3、将CPU数据拷贝给GPU======*/
/** copy data to GPU */
hipMemcpy(dx, hx, nbytes, hipMemcpyHostToDevice);
hipMemcpy(dy, hy, nbytes, hipMemcpyHostToDevice);
/* warm up GPU */
#if use_warmup
warmup(); // 预热
#endif // use_warmup
/**======4、调用GPU计算======*/
/* call GPU */
// cudaThreadSynchronize(); // 线程同步,等到前面的GPU数据拷贝完成
hipDeviceSynchronize(); // cudaThreadSynchronize() 弃用了
clock_t start = clock();
for (i = 0; i < itr; i++) vec_add<<<grid, bs>>>(dx, dy, dz, N);
// cudaThreadSynchronize(); // 线程同步 等待所有线程处理完成
hipDeviceSynchronize(); // cudaThreadSynchronize() 弃用了
/**======5、GPU计算结果拷贝给CPU======*/
hipMemcpy(hz,dz, nbytes, hipMemcpyDeviceToHost);
cout<<"GPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// 计算CPU的时间
start = clock();
for (i = 0; i < itr; i++) vec_add_host(hx, hy, hz, N);
cout<<"CPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// 释放内存
hipFree(dx);
hipFree(dy);
hipFree(dz);
free(hx);
free(hy);
free(hz);
return 0;
}
/* warm up GPU */
__global__ void warmup_knl()
{
int i, j;
i = 1;
j = 2;
i = i + j;
}
void warmup()
{
int i;
for (i = 0; i < 8; i++) {
warmup_knl<<<1, 256>>>();
}
}
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
for(int i=0;i<N;i++)
{
z[i]=x[i]+y[i];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /**
https://blog.csdn.net/Bruce_0712/article/details/64928442
cudaDeviceSynchronize():该方法将停止CPU端线程的执行,直到GPU端完成之前CUDA的任务,包括kernel函数、数据拷贝等。
cudaThreadSynchronize():该方法的作用和cudaDeviceSynchronize()基本相同,但它不是一个被推荐的方法,也许在后期版本的CUDA中会被删除。
cudaStreamSynchronize():这个方法接受一个stream ID,它将阻止CPU执行直到GPU端完成相应stream ID的所有CUDA任务,但其它stream中的CUDA任务可能执行完也可能没有执行完。
跨warp进行同步,您需要使用 __ syncthreads()
(1)在同一个warp内的线程读写shared/global 不需同步,
读写global和shared是立刻对本warp内的其他线程立刻可见的。
(2)在同一个block内的不同warp内线程读写shared/global 需同步,
这种读写必须使用__syncthreads(), 或者__threadfence()来实现不同的读写可见效果。
(3)在同一个grid内的不同block内的线程读写shared/gloabl 需要同步
这种读写必须使用__threadfence*()来实现一定的读写可见效果。
// 执行结果
[vec_add.cu:59] GPU 实现向量的加法
allocated 76.29 MB on GPU
GPU cost time=0.109505s last_num=4e+07
CPU cost time=2.46585s last_num=4e+07
*/
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <ctime>
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
#define use_warmup 1
using namespace std;
typedef float FLOAT;
// GPU预热(可以提升GPU计算速度)
void warmup();
// CPU 向量加法
// __host__ void vec_add_host(FLOAT *x,FLOAT* y,FLOAT *z,int N);
// or host函数__host__可以省略
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N);
// GPU 函数
__global__ void vec_add(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
// 获取线程id(同时有很多个线程在执行这个函数,通过线程id区分)
/**
* <<<(256,256),256>>> grid 2维 block 1维 tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y
* <<<256,256>>> grid 1维 block 1维 tid=threadIdx.x+blockDim.x*blockIdx.x
* <<<1,256>>> grid 1维 block 1维 tid=threadIdx.x
* <<<256,1>>> grid 1维 block 1维 tid=blockDim.x*blockIdx.x
*/
int tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y;
if (tid<N) z[tid]=x[tid]+y[tid]; // 开的线程数必须大于数据总数,保证每个数据都能参与计算
// __syncthreads(); // 线程同步
}
int main(int argc, char *argv[])
{
mycout<<"GPU 实现向量的加法"<<endl;
int N = 20000000;
int nbytes = N * sizeof(FLOAT);
/* 1D block */
int bs = 256;
/* 2D grid */
// int s = ceil(sqrt((N + bs - 1.) / bs));
int s = ceil(sqrt(1.0*N / bs));
dim3 grid = dim3(s, s);
// dx 表示gpu变量,hx表示cpu变量
FLOAT *dx = NULL, *hx = NULL;
FLOAT *dy = NULL, *hy = NULL;
FLOAT *dz = NULL, *hz = NULL;
int itr = 30;
int i;
// double th, td;
/**======1、CPU 创建变量赋值==========*/
/* alllocate CPU mem */
hx = (FLOAT *) malloc(nbytes);
hy = (FLOAT *) malloc(nbytes);
hz = (FLOAT *) malloc(nbytes);
if (hx == NULL || hy == NULL || hz == NULL) {
// printf("couldn't allocate CPU memory\n");
mycout<<"couldn't allocate CPU memory"<<endl;
return -2;
}
/*给CPU变量赋值*/
// fill the arrays 'hx' and 'hy' on the CPU
for (int i=0; i<N; i++) {
hx[i] = i;
hy[i] = i ;
}
/* warm up GPU */
#if use_warmup
warmup(); // 预热
#endif // use_warmup
/**======2、GPU 分配内存======*/
/* allocate GPU mem */
hipMalloc((void **)&dx, nbytes);
hipMalloc((void **)&dy, nbytes);
hipMalloc((void **)&dz, nbytes);
if (dx == NULL || dy == NULL || dz == NULL) {
// printf("couldn't allocate GPU memory\n");
mycout<<"couldn't allocate GPU memory"<<endl;
return -1;
}
printf("allocated %.2f MB on GPU\n", nbytes / (1024.f * 1024.f));
/**======3、将CPU数据拷贝给GPU======*/
/** copy data to GPU */
hipMemcpy(dx, hx, nbytes, hipMemcpyHostToDevice);
hipMemcpy(dy, hy, nbytes, hipMemcpyHostToDevice);
/* warm up GPU */
#if use_warmup
warmup(); // 预热
#endif // use_warmup
/**======4、调用GPU计算======*/
/* call GPU */
// cudaThreadSynchronize(); // 线程同步,等到前面的GPU数据拷贝完成
hipDeviceSynchronize(); // cudaThreadSynchronize() 弃用了
clock_t start = clock();
for (i = 0; i < itr; i++) vec_add<<<grid, bs>>>(dx, dy, dz, N);
// cudaThreadSynchronize(); // 线程同步 等待所有线程处理完成
hipDeviceSynchronize(); // cudaThreadSynchronize() 弃用了
/**======5、GPU计算结果拷贝给CPU======*/
hipMemcpy(hz,dz, nbytes, hipMemcpyDeviceToHost);
cout<<"GPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// 计算CPU的时间
start = clock();
for (i = 0; i < itr; i++) vec_add_host(hx, hy, hz, N);
cout<<"CPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// 释放内存
hipFree(dx);
hipFree(dy);
hipFree(dz);
free(hx);
free(hy);
free(hz);
return 0;
}
/* warm up GPU */
__global__ void warmup_knl()
{
int i, j;
i = 1;
j = 2;
i = i + j;
}
void warmup()
{
int i;
for (i = 0; i < 8; i++) {
warmup_knl<<<1, 256>>>();
}
}
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
for(int i=0;i<N;i++)
{
z[i]=x[i]+y[i];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7vec_addPfS_S_i
.globl _Z7vec_addPfS_S_i
.p2align 8
.type _Z7vec_addPfS_S_i,@function
_Z7vec_addPfS_S_i:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x20
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s4, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s15
s_and_b32 s3, s3, 0xffff
s_add_i32 s2, s2, s14
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s4, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7vec_addPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7vec_addPfS_S_i, .Lfunc_end0-_Z7vec_addPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z10warmup_knlv
.globl _Z10warmup_knlv
.p2align 8
.type _Z10warmup_knlv,@function
_Z10warmup_knlv:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10warmup_knlv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z10warmup_knlv, .Lfunc_end1-_Z10warmup_knlv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7vec_addPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7vec_addPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10warmup_knlv
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z10warmup_knlv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /**
https://blog.csdn.net/Bruce_0712/article/details/64928442
cudaDeviceSynchronize():该方法将停止CPU端线程的执行,直到GPU端完成之前CUDA的任务,包括kernel函数、数据拷贝等。
cudaThreadSynchronize():该方法的作用和cudaDeviceSynchronize()基本相同,但它不是一个被推荐的方法,也许在后期版本的CUDA中会被删除。
cudaStreamSynchronize():这个方法接受一个stream ID,它将阻止CPU执行直到GPU端完成相应stream ID的所有CUDA任务,但其它stream中的CUDA任务可能执行完也可能没有执行完。
跨warp进行同步,您需要使用 __ syncthreads()
(1)在同一个warp内的线程读写shared/global 不需同步,
读写global和shared是立刻对本warp内的其他线程立刻可见的。
(2)在同一个block内的不同warp内线程读写shared/global 需同步,
这种读写必须使用__syncthreads(), 或者__threadfence()来实现不同的读写可见效果。
(3)在同一个grid内的不同block内的线程读写shared/gloabl 需要同步
这种读写必须使用__threadfence*()来实现一定的读写可见效果。
// 执行结果
[vec_add.cu:59] GPU 实现向量的加法
allocated 76.29 MB on GPU
GPU cost time=0.109505s last_num=4e+07
CPU cost time=2.46585s last_num=4e+07
*/
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <ctime>
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
#define use_warmup 1
using namespace std;
typedef float FLOAT;
// GPU预热(可以提升GPU计算速度)
void warmup();
// CPU 向量加法
// __host__ void vec_add_host(FLOAT *x,FLOAT* y,FLOAT *z,int N);
// or host函数__host__可以省略
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N);
// GPU 函数
__global__ void vec_add(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
// 获取线程id(同时有很多个线程在执行这个函数,通过线程id区分)
/**
* <<<(256,256),256>>> grid 2维 block 1维 tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y
* <<<256,256>>> grid 1维 block 1维 tid=threadIdx.x+blockDim.x*blockIdx.x
* <<<1,256>>> grid 1维 block 1维 tid=threadIdx.x
* <<<256,1>>> grid 1维 block 1维 tid=blockDim.x*blockIdx.x
*/
int tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y;
if (tid<N) z[tid]=x[tid]+y[tid]; // 开的线程数必须大于数据总数,保证每个数据都能参与计算
// __syncthreads(); // 线程同步
}
int main(int argc, char *argv[])
{
mycout<<"GPU 实现向量的加法"<<endl;
int N = 20000000;
int nbytes = N * sizeof(FLOAT);
/* 1D block */
int bs = 256;
/* 2D grid */
// int s = ceil(sqrt((N + bs - 1.) / bs));
int s = ceil(sqrt(1.0*N / bs));
dim3 grid = dim3(s, s);
// dx 表示gpu变量,hx表示cpu变量
FLOAT *dx = NULL, *hx = NULL;
FLOAT *dy = NULL, *hy = NULL;
FLOAT *dz = NULL, *hz = NULL;
int itr = 30;
int i;
// double th, td;
/**======1、CPU 创建变量赋值==========*/
/* alllocate CPU mem */
hx = (FLOAT *) malloc(nbytes);
hy = (FLOAT *) malloc(nbytes);
hz = (FLOAT *) malloc(nbytes);
if (hx == NULL || hy == NULL || hz == NULL) {
// printf("couldn't allocate CPU memory\n");
mycout<<"couldn't allocate CPU memory"<<endl;
return -2;
}
/*给CPU变量赋值*/
// fill the arrays 'hx' and 'hy' on the CPU
for (int i=0; i<N; i++) {
hx[i] = i;
hy[i] = i ;
}
/* warm up GPU */
#if use_warmup
warmup(); // 预热
#endif // use_warmup
/**======2、GPU 分配内存======*/
/* allocate GPU mem */
hipMalloc((void **)&dx, nbytes);
hipMalloc((void **)&dy, nbytes);
hipMalloc((void **)&dz, nbytes);
if (dx == NULL || dy == NULL || dz == NULL) {
// printf("couldn't allocate GPU memory\n");
mycout<<"couldn't allocate GPU memory"<<endl;
return -1;
}
printf("allocated %.2f MB on GPU\n", nbytes / (1024.f * 1024.f));
/**======3、将CPU数据拷贝给GPU======*/
/** copy data to GPU */
hipMemcpy(dx, hx, nbytes, hipMemcpyHostToDevice);
hipMemcpy(dy, hy, nbytes, hipMemcpyHostToDevice);
/* warm up GPU */
#if use_warmup
warmup(); // 预热
#endif // use_warmup
/**======4、调用GPU计算======*/
/* call GPU */
// cudaThreadSynchronize(); // 线程同步,等到前面的GPU数据拷贝完成
hipDeviceSynchronize(); // cudaThreadSynchronize() 弃用了
clock_t start = clock();
for (i = 0; i < itr; i++) vec_add<<<grid, bs>>>(dx, dy, dz, N);
// cudaThreadSynchronize(); // 线程同步 等待所有线程处理完成
hipDeviceSynchronize(); // cudaThreadSynchronize() 弃用了
/**======5、GPU计算结果拷贝给CPU======*/
hipMemcpy(hz,dz, nbytes, hipMemcpyDeviceToHost);
cout<<"GPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// 计算CPU的时间
start = clock();
for (i = 0; i < itr; i++) vec_add_host(hx, hy, hz, N);
cout<<"CPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// 释放内存
hipFree(dx);
hipFree(dy);
hipFree(dz);
free(hx);
free(hy);
free(hz);
return 0;
}
/* warm up GPU */
__global__ void warmup_knl()
{
int i, j;
i = 1;
j = 2;
i = i + j;
}
void warmup()
{
int i;
for (i = 0; i < 8; i++) {
warmup_knl<<<1, 256>>>();
}
}
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
for(int i=0;i<N;i++)
{
z[i]=x[i]+y[i];
}
} | .text
.file "vec_add1.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z22__device_stub__vec_addPfS_S_i # -- Begin function _Z22__device_stub__vec_addPfS_S_i
.p2align 4, 0x90
.type _Z22__device_stub__vec_addPfS_S_i,@function
_Z22__device_stub__vec_addPfS_S_i: # @_Z22__device_stub__vec_addPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7vec_addPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z22__device_stub__vec_addPfS_S_i, .Lfunc_end0-_Z22__device_stub__vec_addPfS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x405312d000000000 # double 76.2939453125
.LCPI1_1:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $101, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $67, %esi
callq _ZNSolsEi
movq %rax, %rbx
movl $.L.str.3, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.4, %esi
movl $25, %edx
movq %rbx, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbx), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r14
testq %r14, %r14
je .LBB1_40
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r14)
je .LBB1_3
# %bb.2:
movzbl 67(%r14), %eax
jmp .LBB1_4
.LBB1_3:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movq %rbx, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq $0, 16(%rsp)
movq $0, 8(%rsp)
movq $0, (%rsp)
movl $80000000, %edi # imm = 0x4C4B400
callq malloc
movq %rax, %rbx
movl $80000000, %edi # imm = 0x4C4B400
callq malloc
movq %rax, %r14
movl $80000000, %edi # imm = 0x4C4B400
callq malloc
testq %rbx, %rbx
je .LBB1_15
# %bb.5: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
testq %r14, %r14
je .LBB1_15
# %bb.6: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movq %rax, %r15
testq %rax, %rax
je .LBB1_15
# %bb.7: # %.preheader.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_8: # %.preheader
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%rax,4)
movss %xmm0, (%r14,%rax,4)
incq %rax
cmpq $20000000, %rax # imm = 0x1312D00
jne .LBB1_8
# %bb.9:
callq _Z6warmupv
leaq 16(%rsp), %rdi
movl $80000000, %esi # imm = 0x4C4B400
callq hipMalloc
leaq 8(%rsp), %rdi
movl $80000000, %esi # imm = 0x4C4B400
callq hipMalloc
movq %rsp, %rdi
movl $80000000, %esi # imm = 0x4C4B400
callq hipMalloc
cmpq $0, 16(%rsp)
je .LBB1_12
# %bb.10:
cmpq $0, 8(%rsp)
je .LBB1_12
# %bb.11:
cmpq $0, (%rsp)
je .LBB1_12
# %bb.22:
movsd .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero
movl $.L.str.7, %edi
movb $1, %al
callq printf
movq 16(%rsp), %rdi
movl $80000000, %edx # imm = 0x4C4B400
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $80000000, %edx # imm = 0x4C4B400
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
callq _Z6warmupv
callq hipDeviceSynchronize
movl $30, %r12d
callq clock
movq %rax, 32(%rsp) # 8-byte Spill
movabsq $1202590843160, %r13 # imm = 0x11800000118
movabsq $4294967552, %rbp # imm = 0x100000100
jmp .LBB1_23
.p2align 4, 0x90
.LBB1_25: # in Loop: Header=BB1_23 Depth=1
decl %r12d
je .LBB1_26
.LBB1_23: # =>This Inner Loop Header: Depth=1
movq %r13, %rdi
movl $1, %esi
movq %rbp, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_25
# %bb.24: # in Loop: Header=BB1_23 Depth=1
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $20000000, 28(%rsp) # imm = 0x1312D00
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 28(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
movl $_Z7vec_addPfS_S_i, %edi
leaq 112(%rsp), %r9
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB1_25
.LBB1_15:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $101, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $97, %esi
callq _ZNSolsEi
movq %rax, %rbx
movl $.L.str.3, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.5, %esi
movl $28, %edx
movq %rbx, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbx), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r14
testq %r14, %r14
je .LBB1_40
# %bb.16: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i68
cmpb $0, 56(%r14)
je .LBB1_18
# %bb.17:
movzbl 67(%r14), %eax
jmp .LBB1_19
.LBB1_12:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $101, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $121, %esi
callq _ZNSolsEi
movq %rax, %rbx
movl $.L.str.3, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.6, %esi
movl $28, %edx
movq %rbx, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbx), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r14
testq %r14, %r14
je .LBB1_40
# %bb.13: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i73
cmpb $0, 56(%r14)
je .LBB1_20
# %bb.14:
movzbl 67(%r14), %eax
jmp .LBB1_21
.LBB1_26:
callq hipDeviceSynchronize
movq (%rsp), %rsi
movl $80000000, %edx # imm = 0x4C4B400
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movl $_ZSt4cout, %edi
movl $.L.str.8, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
callq clock
subq 32(%rsp), %rax # 8-byte Folded Reload
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
divsd .LCPI1_1(%rip), %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq %rax, %r12
movl $.L.str.9, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.10, %esi
movl $9, %edx
movq %r12, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 79999996(%r15), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movq %r12, %rdi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB1_40
# %bb.27: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i78
cmpb $0, 56(%r12)
je .LBB1_29
# %bb.28:
movzbl 67(%r12), %ecx
jmp .LBB1_30
.LBB1_18:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_19: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit71
movsbl %al, %esi
movq %rbx, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $-2, %eax
jmp .LBB1_39
.LBB1_20:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_21: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit76
movsbl %al, %esi
movq %rbx, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $-1, %eax
jmp .LBB1_39
.LBB1_29:
movq %r12, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB1_30: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit81
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %r13d, %r13d
callq clock
movq %rax, %r12
.p2align 4, 0x90
.LBB1_31: # %.lr.ph.i.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_32 Depth 2
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_32: # %.lr.ph.i
# Parent Loop BB1_31 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rbx,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%r14,%rax,4), %xmm0
movss %xmm0, (%r15,%rax,4)
incq %rax
cmpq $20000000, %rax # imm = 0x1312D00
jne .LBB1_32
# %bb.33: # %_Z12vec_add_hostPfS_S_i.exit
# in Loop: Header=BB1_31 Depth=1
incl %r13d
cmpl $30, %r13d
jne .LBB1_31
# %bb.34:
movl $_ZSt4cout, %edi
movl $.L.str.11, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
callq clock
subq %r12, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
divsd .LCPI1_1(%rip), %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq %rax, %r12
movl $.L.str.9, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.10, %esi
movl $9, %edx
movq %r12, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 79999996(%r15), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movq %r12, %rdi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB1_40
# %bb.35: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i83
cmpb $0, 56(%r12)
je .LBB1_37
# %bb.36:
movzbl 67(%r12), %ecx
jmp .LBB1_38
.LBB1_37:
movq %r12, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB1_38: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit86
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
.LBB1_39:
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_40:
.cfi_def_cfa_offset 208
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.globl _Z6warmupv # -- Begin function _Z6warmupv
.p2align 4, 0x90
.type _Z6warmupv,@function
_Z6warmupv: # @_Z6warmupv
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $56, %rsp
.cfi_def_cfa_offset 112
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movabsq $4294967297, %rbx # imm = 0x100000001
movl $8, %r12d
leaq 255(%rbx), %r14
leaq 8(%rsp), %r13
movq %rsp, %rbp
leaq 48(%rsp), %r15
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_3: # in Loop: Header=BB2_1 Depth=1
decl %r12d
je .LBB2_4
.LBB2_1: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_3
# %bb.2: # in Loop: Header=BB2_1 Depth=1
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
movq %r13, %rdx
movq %rbp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
movl $_Z10warmup_knlv, %edi
movq %r15, %r9
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB2_3
.LBB2_4:
addq $56, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z6warmupv, .Lfunc_end2-_Z6warmupv
.cfi_endproc
# -- End function
.globl _Z12vec_add_hostPfS_S_i # -- Begin function _Z12vec_add_hostPfS_S_i
.p2align 4, 0x90
.type _Z12vec_add_hostPfS_S_i,@function
_Z12vec_add_hostPfS_S_i: # @_Z12vec_add_hostPfS_S_i
.cfi_startproc
# %bb.0:
testl %ecx, %ecx
jle .LBB3_3
# %bb.1: # %.lr.ph.preheader
movl %ecx, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rdi,%rcx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%rsi,%rcx,4), %xmm0
movss %xmm0, (%rdx,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB3_2
.LBB3_3: # %._crit_edge
retq
.Lfunc_end3:
.size _Z12vec_add_hostPfS_S_i, .Lfunc_end3-_Z12vec_add_hostPfS_S_i
.cfi_endproc
# -- End function
.globl _Z25__device_stub__warmup_knlv # -- Begin function _Z25__device_stub__warmup_knlv
.p2align 4, 0x90
.type _Z25__device_stub__warmup_knlv,@function
_Z25__device_stub__warmup_knlv: # @_Z25__device_stub__warmup_knlv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z10warmup_knlv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end4:
.size _Z25__device_stub__warmup_knlv, .Lfunc_end4-_Z25__device_stub__warmup_knlv
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7vec_addPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10warmup_knlv, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7vec_addPfS_S_i,@object # @_Z7vec_addPfS_S_i
.section .rodata,"a",@progbits
.globl _Z7vec_addPfS_S_i
.p2align 3, 0x0
_Z7vec_addPfS_S_i:
.quad _Z22__device_stub__vec_addPfS_S_i
.size _Z7vec_addPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "["
.size .L.str, 2
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/wucng/Study/master/cuda/example/vec_add1.hip"
.size .L.str.1, 102
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz ":"
.size .L.str.2, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "] "
.size .L.str.3, 3
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "GPU \345\256\236\347\216\260\345\220\221\351\207\217\347\232\204\345\212\240\346\263\225"
.size .L.str.4, 26
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "couldn't allocate CPU memory"
.size .L.str.5, 29
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "couldn't allocate GPU memory"
.size .L.str.6, 29
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "allocated %.2f MB on GPU\n"
.size .L.str.7, 26
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "GPU cost time="
.size .L.str.8, 15
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "s\t"
.size .L.str.9, 3
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "last_num="
.size .L.str.10, 10
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "CPU cost time="
.size .L.str.11, 15
.type _Z10warmup_knlv,@object # @_Z10warmup_knlv
.section .rodata,"a",@progbits
.globl _Z10warmup_knlv
.p2align 3, 0x0
_Z10warmup_knlv:
.quad _Z25__device_stub__warmup_knlv
.size _Z10warmup_knlv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7vec_addPfS_S_i"
.size .L__unnamed_1, 18
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z10warmup_knlv"
.size .L__unnamed_2, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__vec_addPfS_S_i
.addrsig_sym _Z25__device_stub__warmup_knlv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7vec_addPfS_S_i
.addrsig_sym _ZSt4cout
.addrsig_sym _Z10warmup_knlv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10warmup_knlv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z7vec_addPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002600 */
/*0020*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e280000002500 */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e620000002100 */
/*0040*/ IMAD R0, R0, c[0x0][0xc], R5 ; /* 0x0000030000007a24 */
/* 0x001fc800078e0205 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0203 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fda0003f06270 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*00a0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0207 */
/*00b0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x0c0fe400078e0207 */
/*00c0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00d0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00e0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e0207 */
/*00f0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*0100*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0110*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0120*/ BRA 0x120; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7vec_addPfS_S_i
.globl _Z7vec_addPfS_S_i
.p2align 8
.type _Z7vec_addPfS_S_i,@function
_Z7vec_addPfS_S_i:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x20
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s4, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s15
s_and_b32 s3, s3, 0xffff
s_add_i32 s2, s2, s14
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s4, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7vec_addPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7vec_addPfS_S_i, .Lfunc_end0-_Z7vec_addPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z10warmup_knlv
.globl _Z10warmup_knlv
.p2align 8
.type _Z10warmup_knlv,@function
_Z10warmup_knlv:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10warmup_knlv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z10warmup_knlv, .Lfunc_end1-_Z10warmup_knlv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7vec_addPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7vec_addPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10warmup_knlv
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z10warmup_knlv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000ed302_00000000-6_vec_add1.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z12vec_add_hostPfS_S_i
.type _Z12vec_add_hostPfS_S_i, @function
_Z12vec_add_hostPfS_S_i:
.LFB3671:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L3
movslq %ecx, %rcx
salq $2, %rcx
movl $0, %eax
.L5:
movss (%rdi,%rax), %xmm0
addss (%rsi,%rax), %xmm0
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq %rcx, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE3671:
.size _Z12vec_add_hostPfS_S_i, .-_Z12vec_add_hostPfS_S_i
.globl _Z31__device_stub__Z7vec_addPfS_S_iPfS_S_i
.type _Z31__device_stub__Z7vec_addPfS_S_iPfS_S_i, @function
_Z31__device_stub__Z7vec_addPfS_S_iPfS_S_i:
.LFB3696:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7vec_addPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z31__device_stub__Z7vec_addPfS_S_iPfS_S_i, .-_Z31__device_stub__Z7vec_addPfS_S_iPfS_S_i
.globl _Z7vec_addPfS_S_i
.type _Z7vec_addPfS_S_i, @function
_Z7vec_addPfS_S_i:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z7vec_addPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z7vec_addPfS_S_i, .-_Z7vec_addPfS_S_i
.globl _Z29__device_stub__Z10warmup_knlvv
.type _Z29__device_stub__Z10warmup_knlvv, @function
_Z29__device_stub__Z10warmup_knlvv:
.LFB3698:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z10warmup_knlv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3698:
.size _Z29__device_stub__Z10warmup_knlvv, .-_Z29__device_stub__Z10warmup_knlvv
.globl _Z10warmup_knlv
.type _Z10warmup_knlv, @function
_Z10warmup_knlv:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z10warmup_knlvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _Z10warmup_knlv, .-_Z10warmup_knlv
.globl _Z6warmupv
.type _Z6warmupv, @function
_Z6warmupv:
.LFB3670:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $32, %rsp
.cfi_def_cfa_offset 48
movl $8, %ebx
jmp .L25
.L24:
subl $1, %ebx
je .L28
.L25:
movl $256, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L24
call _Z29__device_stub__Z10warmup_knlvv
jmp .L24
.L28:
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3670:
.size _Z6warmupv, .-_Z6warmupv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "["
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "/home/ubuntu/Datasets/stackv2/train-structured/wucng/Study/master/cuda/example/vec_add1.cu"
.section .rodata.str1.1
.LC2:
.string ":"
.LC3:
.string "] "
.LC4:
.string "GPU \345\256\236\347\216\260\345\220\221\351\207\217\347\232\204\345\212\240\346\263\225"
.LC5:
.string "couldn't allocate CPU memory"
.LC6:
.string "couldn't allocate GPU memory"
.LC8:
.string "allocated %.2f MB on GPU\n"
.LC9:
.string "GPU cost time="
.LC11:
.string "s\t"
.LC12:
.string "last_num="
.LC13:
.string "CPU cost time="
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC1(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $67, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC3(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC4(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $280, 32(%rsp)
movl $280, 36(%rsp)
movl $1, 40(%rsp)
movq $0, 8(%rsp)
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movl $80000000, %edi
call malloc@PLT
movq %rax, %rbp
movl $80000000, %edi
call malloc@PLT
movq %rax, %rbx
movl $80000000, %edi
call malloc@PLT
movq %rax, %r13
testq %rbp, %rbp
sete %al
testq %rbx, %rbx
sete %dl
orb %dl, %al
jne .L40
testq %r13, %r13
je .L40
movl $0, %eax
.L30:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rax,4)
movss %xmm0, (%rbx,%rax,4)
addq $1, %rax
cmpq $20000000, %rax
jne .L30
call _Z6warmupv
leaq 8(%rsp), %rdi
movl $80000000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $80000000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $80000000, %esi
call cudaMalloc@PLT
cmpq $0, 8(%rsp)
je .L33
cmpq $0, 16(%rsp)
je .L33
cmpq $0, 24(%rsp)
je .L33
movsd .LC7(%rip), %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $80000000, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $80000000, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
call _Z6warmupv
call cudaDeviceSynchronize@PLT
call clock@PLT
movq %rax, %r14
movl $30, %r12d
jmp .L36
.L40:
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC1(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $97, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC3(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC5(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $-2, %eax
jmp .L29
.L33:
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC1(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $121, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC3(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC6(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $-1, %eax
jmp .L29
.L35:
subl $1, %r12d
je .L45
.L36:
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl 40(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L35
movl $20000000, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z31__device_stub__Z7vec_addPfS_S_iPfS_S_i
jmp .L35
.L45:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movl $80000000, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
leaq .LC9(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %r12
call clock@PLT
subq %r14, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC10(%rip), %xmm0
movq %r12, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
leaq .LC11(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC12(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 79999996(%r13), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
call clock@PLT
movq %rax, %r14
movl $30, %r12d
.L37:
movl $20000000, %ecx
movq %r13, %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z12vec_add_hostPfS_S_i
subl $1, %r12d
jne .L37
leaq .LC13(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %r12
call clock@PLT
subq %r14, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC10(%rip), %xmm0
movq %r12, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
leaq .LC11(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC12(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 79999996(%r13), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movl $0, %eax
.L29:
movq 56(%rsp), %rdx
subq %fs:40, %rdx
jne .L46
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L46:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC14:
.string "_Z10warmup_knlv"
.LC15:
.string "_Z7vec_addPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3701:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z10warmup_knlv(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z7vec_addPfS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3701:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC7:
.long 0
.long 1079186128
.align 8
.LC10:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "vec_add1.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z22__device_stub__vec_addPfS_S_i # -- Begin function _Z22__device_stub__vec_addPfS_S_i
.p2align 4, 0x90
.type _Z22__device_stub__vec_addPfS_S_i,@function
_Z22__device_stub__vec_addPfS_S_i: # @_Z22__device_stub__vec_addPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7vec_addPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z22__device_stub__vec_addPfS_S_i, .Lfunc_end0-_Z22__device_stub__vec_addPfS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x405312d000000000 # double 76.2939453125
.LCPI1_1:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $101, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $67, %esi
callq _ZNSolsEi
movq %rax, %rbx
movl $.L.str.3, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.4, %esi
movl $25, %edx
movq %rbx, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbx), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r14
testq %r14, %r14
je .LBB1_40
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r14)
je .LBB1_3
# %bb.2:
movzbl 67(%r14), %eax
jmp .LBB1_4
.LBB1_3:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movq %rbx, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq $0, 16(%rsp)
movq $0, 8(%rsp)
movq $0, (%rsp)
movl $80000000, %edi # imm = 0x4C4B400
callq malloc
movq %rax, %rbx
movl $80000000, %edi # imm = 0x4C4B400
callq malloc
movq %rax, %r14
movl $80000000, %edi # imm = 0x4C4B400
callq malloc
testq %rbx, %rbx
je .LBB1_15
# %bb.5: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
testq %r14, %r14
je .LBB1_15
# %bb.6: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movq %rax, %r15
testq %rax, %rax
je .LBB1_15
# %bb.7: # %.preheader.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_8: # %.preheader
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%rax,4)
movss %xmm0, (%r14,%rax,4)
incq %rax
cmpq $20000000, %rax # imm = 0x1312D00
jne .LBB1_8
# %bb.9:
callq _Z6warmupv
leaq 16(%rsp), %rdi
movl $80000000, %esi # imm = 0x4C4B400
callq hipMalloc
leaq 8(%rsp), %rdi
movl $80000000, %esi # imm = 0x4C4B400
callq hipMalloc
movq %rsp, %rdi
movl $80000000, %esi # imm = 0x4C4B400
callq hipMalloc
cmpq $0, 16(%rsp)
je .LBB1_12
# %bb.10:
cmpq $0, 8(%rsp)
je .LBB1_12
# %bb.11:
cmpq $0, (%rsp)
je .LBB1_12
# %bb.22:
movsd .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero
movl $.L.str.7, %edi
movb $1, %al
callq printf
movq 16(%rsp), %rdi
movl $80000000, %edx # imm = 0x4C4B400
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $80000000, %edx # imm = 0x4C4B400
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
callq _Z6warmupv
callq hipDeviceSynchronize
movl $30, %r12d
callq clock
movq %rax, 32(%rsp) # 8-byte Spill
movabsq $1202590843160, %r13 # imm = 0x11800000118
movabsq $4294967552, %rbp # imm = 0x100000100
jmp .LBB1_23
.p2align 4, 0x90
.LBB1_25: # in Loop: Header=BB1_23 Depth=1
decl %r12d
je .LBB1_26
.LBB1_23: # =>This Inner Loop Header: Depth=1
movq %r13, %rdi
movl $1, %esi
movq %rbp, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_25
# %bb.24: # in Loop: Header=BB1_23 Depth=1
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $20000000, 28(%rsp) # imm = 0x1312D00
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 28(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
movl $_Z7vec_addPfS_S_i, %edi
leaq 112(%rsp), %r9
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB1_25
.LBB1_15:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $101, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $97, %esi
callq _ZNSolsEi
movq %rax, %rbx
movl $.L.str.3, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.5, %esi
movl $28, %edx
movq %rbx, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbx), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r14
testq %r14, %r14
je .LBB1_40
# %bb.16: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i68
cmpb $0, 56(%r14)
je .LBB1_18
# %bb.17:
movzbl 67(%r14), %eax
jmp .LBB1_19
.LBB1_12:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $101, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $121, %esi
callq _ZNSolsEi
movq %rax, %rbx
movl $.L.str.3, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.6, %esi
movl $28, %edx
movq %rbx, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbx), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r14
testq %r14, %r14
je .LBB1_40
# %bb.13: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i73
cmpb $0, 56(%r14)
je .LBB1_20
# %bb.14:
movzbl 67(%r14), %eax
jmp .LBB1_21
.LBB1_26:
callq hipDeviceSynchronize
movq (%rsp), %rsi
movl $80000000, %edx # imm = 0x4C4B400
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movl $_ZSt4cout, %edi
movl $.L.str.8, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
callq clock
subq 32(%rsp), %rax # 8-byte Folded Reload
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
divsd .LCPI1_1(%rip), %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq %rax, %r12
movl $.L.str.9, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.10, %esi
movl $9, %edx
movq %r12, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 79999996(%r15), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movq %r12, %rdi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB1_40
# %bb.27: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i78
cmpb $0, 56(%r12)
je .LBB1_29
# %bb.28:
movzbl 67(%r12), %ecx
jmp .LBB1_30
.LBB1_18:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_19: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit71
movsbl %al, %esi
movq %rbx, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $-2, %eax
jmp .LBB1_39
.LBB1_20:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_21: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit76
movsbl %al, %esi
movq %rbx, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $-1, %eax
jmp .LBB1_39
.LBB1_29:
movq %r12, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB1_30: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit81
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %r13d, %r13d
callq clock
movq %rax, %r12
.p2align 4, 0x90
.LBB1_31: # %.lr.ph.i.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_32 Depth 2
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_32: # %.lr.ph.i
# Parent Loop BB1_31 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rbx,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%r14,%rax,4), %xmm0
movss %xmm0, (%r15,%rax,4)
incq %rax
cmpq $20000000, %rax # imm = 0x1312D00
jne .LBB1_32
# %bb.33: # %_Z12vec_add_hostPfS_S_i.exit
# in Loop: Header=BB1_31 Depth=1
incl %r13d
cmpl $30, %r13d
jne .LBB1_31
# %bb.34:
movl $_ZSt4cout, %edi
movl $.L.str.11, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
callq clock
subq %r12, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
divsd .LCPI1_1(%rip), %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq %rax, %r12
movl $.L.str.9, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.10, %esi
movl $9, %edx
movq %r12, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 79999996(%r15), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movq %r12, %rdi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB1_40
# %bb.35: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i83
cmpb $0, 56(%r12)
je .LBB1_37
# %bb.36:
movzbl 67(%r12), %ecx
jmp .LBB1_38
.LBB1_37:
movq %r12, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB1_38: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit86
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
.LBB1_39:
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_40:
.cfi_def_cfa_offset 208
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.globl _Z6warmupv # -- Begin function _Z6warmupv
.p2align 4, 0x90
.type _Z6warmupv,@function
_Z6warmupv: # @_Z6warmupv
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $56, %rsp
.cfi_def_cfa_offset 112
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movabsq $4294967297, %rbx # imm = 0x100000001
movl $8, %r12d
leaq 255(%rbx), %r14
leaq 8(%rsp), %r13
movq %rsp, %rbp
leaq 48(%rsp), %r15
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_3: # in Loop: Header=BB2_1 Depth=1
decl %r12d
je .LBB2_4
.LBB2_1: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_3
# %bb.2: # in Loop: Header=BB2_1 Depth=1
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
movq %r13, %rdx
movq %rbp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
movl $_Z10warmup_knlv, %edi
movq %r15, %r9
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB2_3
.LBB2_4:
addq $56, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z6warmupv, .Lfunc_end2-_Z6warmupv
.cfi_endproc
# -- End function
.globl _Z12vec_add_hostPfS_S_i # -- Begin function _Z12vec_add_hostPfS_S_i
.p2align 4, 0x90
.type _Z12vec_add_hostPfS_S_i,@function
_Z12vec_add_hostPfS_S_i: # @_Z12vec_add_hostPfS_S_i
.cfi_startproc
# %bb.0:
testl %ecx, %ecx
jle .LBB3_3
# %bb.1: # %.lr.ph.preheader
movl %ecx, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rdi,%rcx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%rsi,%rcx,4), %xmm0
movss %xmm0, (%rdx,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB3_2
.LBB3_3: # %._crit_edge
retq
.Lfunc_end3:
.size _Z12vec_add_hostPfS_S_i, .Lfunc_end3-_Z12vec_add_hostPfS_S_i
.cfi_endproc
# -- End function
.globl _Z25__device_stub__warmup_knlv # -- Begin function _Z25__device_stub__warmup_knlv
.p2align 4, 0x90
.type _Z25__device_stub__warmup_knlv,@function
_Z25__device_stub__warmup_knlv: # @_Z25__device_stub__warmup_knlv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z10warmup_knlv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end4:
.size _Z25__device_stub__warmup_knlv, .Lfunc_end4-_Z25__device_stub__warmup_knlv
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7vec_addPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10warmup_knlv, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7vec_addPfS_S_i,@object # @_Z7vec_addPfS_S_i
.section .rodata,"a",@progbits
.globl _Z7vec_addPfS_S_i
.p2align 3, 0x0
_Z7vec_addPfS_S_i:
.quad _Z22__device_stub__vec_addPfS_S_i
.size _Z7vec_addPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "["
.size .L.str, 2
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/wucng/Study/master/cuda/example/vec_add1.hip"
.size .L.str.1, 102
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz ":"
.size .L.str.2, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "] "
.size .L.str.3, 3
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "GPU \345\256\236\347\216\260\345\220\221\351\207\217\347\232\204\345\212\240\346\263\225"
.size .L.str.4, 26
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "couldn't allocate CPU memory"
.size .L.str.5, 29
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "couldn't allocate GPU memory"
.size .L.str.6, 29
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "allocated %.2f MB on GPU\n"
.size .L.str.7, 26
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "GPU cost time="
.size .L.str.8, 15
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "s\t"
.size .L.str.9, 3
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "last_num="
.size .L.str.10, 10
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "CPU cost time="
.size .L.str.11, 15
.type _Z10warmup_knlv,@object # @_Z10warmup_knlv
.section .rodata,"a",@progbits
.globl _Z10warmup_knlv
.p2align 3, 0x0
_Z10warmup_knlv:
.quad _Z25__device_stub__warmup_knlv
.size _Z10warmup_knlv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7vec_addPfS_S_i"
.size .L__unnamed_1, 18
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z10warmup_knlv"
.size .L__unnamed_2, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__vec_addPfS_S_i
.addrsig_sym _Z25__device_stub__warmup_knlv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7vec_addPfS_S_i
.addrsig_sym _ZSt4cout
.addrsig_sym _Z10warmup_knlv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void _reluback(int n, float *y, float *dy) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (y[i] <= 0) dy[i] = 0;
i += blockDim.x * gridDim.x;
}
} | code for sm_80
Function : _Z9_relubackiPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0070*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*0080*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0203 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ SHF.R.S32.HI R5, RZ, 0x1f, R0 ; /* 0x0000001fff057819 */
/* 0x000fe20000011400 */
/*00b0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff077624 */
/* 0x000fe200078e00ff */
/*00c0*/ FSETP.GTU.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720b */
/* 0x004fda0003f0c000 */
/*00d0*/ @!P0 LEA R4, P1, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000048a11 */
/* 0x000fc800078210ff */
/*00e0*/ @!P0 LEA.HI.X R5, R0, c[0x0][0x174], R5, 0x2, P1 ; /* 0x00005d0000058a11 */
/* 0x000fe200008f1405 */
/*00f0*/ IMAD R0, R7, c[0x0][0xc], R0 ; /* 0x0000030007007a24 */
/* 0x000fc800078e0200 */
/*0100*/ @!P0 STG.E [R4.64], RZ ; /* 0x000000ff04008986 */
/* 0x0001e2000c101904 */
/*0110*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fda0003f06270 */
/*0120*/ @!P0 BRA 0x70 ; /* 0xffffff4000008947 */
/* 0x001fea000383ffff */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void _reluback(int n, float *y, float *dy) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (y[i] <= 0) dy[i] = 0;
i += blockDim.x * gridDim.x;
}
} | .file "tmpxft_0009a51c_00000000-6__reluback.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z9_relubackiPfS_iPfS_
.type _Z31__device_stub__Z9_relubackiPfS_iPfS_, @function
_Z31__device_stub__Z9_relubackiPfS_iPfS_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9_relubackiPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z9_relubackiPfS_iPfS_, .-_Z31__device_stub__Z9_relubackiPfS_iPfS_
.globl _Z9_relubackiPfS_
.type _Z9_relubackiPfS_, @function
_Z9_relubackiPfS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z9_relubackiPfS_iPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9_relubackiPfS_, .-_Z9_relubackiPfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9_relubackiPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9_relubackiPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void _reluback(int n, float *y, float *dy) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (y[i] <= 0) dy[i] = 0;
i += blockDim.x * gridDim.x;
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void _reluback(int n, float *y, float *dy) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (y[i] <= 0) dy[i] = 0;
i += blockDim.x * gridDim.x;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void _reluback(int n, float *y, float *dy) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (y[i] <= 0) dy[i] = 0;
i += blockDim.x * gridDim.x;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9_relubackiPfS_
.globl _Z9_relubackiPfS_
.p2align 8
.type _Z9_relubackiPfS_,@function
_Z9_relubackiPfS_:
s_clause 0x1
s_load_b32 s5, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x0
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_mov_b32 s6, exec_lo
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s5, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s5, v[0:1]
v_cmpx_gt_i32_e64 s4, v1
s_cbranch_execz .LBB0_5
s_load_b32 s6, s[2:3], 0x0
s_load_b128 s[0:3], s[0:1], 0x8
v_mov_b32_e32 v0, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s5, s6, s5
s_mov_b32 s6, 0
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s7
v_add_nc_u32_e32 v1, s5, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e32 vcc_lo, s4, v1
s_or_b32 s6, vcc_lo, s6
s_and_not1_b32 exec_lo, exec_lo, s6
s_cbranch_execz .LBB0_5
.LBB0_3:
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s7, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v4, vcc_lo, s0, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v3, vcc_lo
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_cmpx_ge_f32_e32 0, v4
s_cbranch_execz .LBB0_2
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_store_b32 v[2:3], v0, off
s_branch .LBB0_2
.LBB0_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9_relubackiPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9_relubackiPfS_, .Lfunc_end0-_Z9_relubackiPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9_relubackiPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9_relubackiPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void _reluback(int n, float *y, float *dy) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (y[i] <= 0) dy[i] = 0;
i += blockDim.x * gridDim.x;
}
} | .text
.file "_reluback.hip"
.globl _Z24__device_stub___relubackiPfS_ # -- Begin function _Z24__device_stub___relubackiPfS_
.p2align 4, 0x90
.type _Z24__device_stub___relubackiPfS_,@function
_Z24__device_stub___relubackiPfS_: # @_Z24__device_stub___relubackiPfS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9_relubackiPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z24__device_stub___relubackiPfS_, .Lfunc_end0-_Z24__device_stub___relubackiPfS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9_relubackiPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9_relubackiPfS_,@object # @_Z9_relubackiPfS_
.section .rodata,"a",@progbits
.globl _Z9_relubackiPfS_
.p2align 3, 0x0
_Z9_relubackiPfS_:
.quad _Z24__device_stub___relubackiPfS_
.size _Z9_relubackiPfS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9_relubackiPfS_"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub___relubackiPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9_relubackiPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9_relubackiPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0070*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*0080*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0203 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ SHF.R.S32.HI R5, RZ, 0x1f, R0 ; /* 0x0000001fff057819 */
/* 0x000fe20000011400 */
/*00b0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff077624 */
/* 0x000fe200078e00ff */
/*00c0*/ FSETP.GTU.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720b */
/* 0x004fda0003f0c000 */
/*00d0*/ @!P0 LEA R4, P1, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000048a11 */
/* 0x000fc800078210ff */
/*00e0*/ @!P0 LEA.HI.X R5, R0, c[0x0][0x174], R5, 0x2, P1 ; /* 0x00005d0000058a11 */
/* 0x000fe200008f1405 */
/*00f0*/ IMAD R0, R7, c[0x0][0xc], R0 ; /* 0x0000030007007a24 */
/* 0x000fc800078e0200 */
/*0100*/ @!P0 STG.E [R4.64], RZ ; /* 0x000000ff04008986 */
/* 0x0001e2000c101904 */
/*0110*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fda0003f06270 */
/*0120*/ @!P0 BRA 0x70 ; /* 0xffffff4000008947 */
/* 0x001fea000383ffff */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9_relubackiPfS_
.globl _Z9_relubackiPfS_
.p2align 8
.type _Z9_relubackiPfS_,@function
_Z9_relubackiPfS_:
s_clause 0x1
s_load_b32 s5, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x0
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_mov_b32 s6, exec_lo
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s5, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s5, v[0:1]
v_cmpx_gt_i32_e64 s4, v1
s_cbranch_execz .LBB0_5
s_load_b32 s6, s[2:3], 0x0
s_load_b128 s[0:3], s[0:1], 0x8
v_mov_b32_e32 v0, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s5, s6, s5
s_mov_b32 s6, 0
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s7
v_add_nc_u32_e32 v1, s5, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e32 vcc_lo, s4, v1
s_or_b32 s6, vcc_lo, s6
s_and_not1_b32 exec_lo, exec_lo, s6
s_cbranch_execz .LBB0_5
.LBB0_3:
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s7, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v4, vcc_lo, s0, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v3, vcc_lo
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_cmpx_ge_f32_e32 0, v4
s_cbranch_execz .LBB0_2
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_store_b32 v[2:3], v0, off
s_branch .LBB0_2
.LBB0_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9_relubackiPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9_relubackiPfS_, .Lfunc_end0-_Z9_relubackiPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9_relubackiPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9_relubackiPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0009a51c_00000000-6__reluback.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z9_relubackiPfS_iPfS_
.type _Z31__device_stub__Z9_relubackiPfS_iPfS_, @function
_Z31__device_stub__Z9_relubackiPfS_iPfS_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9_relubackiPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z9_relubackiPfS_iPfS_, .-_Z31__device_stub__Z9_relubackiPfS_iPfS_
.globl _Z9_relubackiPfS_
.type _Z9_relubackiPfS_, @function
_Z9_relubackiPfS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z9_relubackiPfS_iPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9_relubackiPfS_, .-_Z9_relubackiPfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9_relubackiPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9_relubackiPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "_reluback.hip"
.globl _Z24__device_stub___relubackiPfS_ # -- Begin function _Z24__device_stub___relubackiPfS_
.p2align 4, 0x90
.type _Z24__device_stub___relubackiPfS_,@function
_Z24__device_stub___relubackiPfS_: # @_Z24__device_stub___relubackiPfS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9_relubackiPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z24__device_stub___relubackiPfS_, .Lfunc_end0-_Z24__device_stub___relubackiPfS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9_relubackiPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9_relubackiPfS_,@object # @_Z9_relubackiPfS_
.section .rodata,"a",@progbits
.globl _Z9_relubackiPfS_
.p2align 3, 0x0
_Z9_relubackiPfS_:
.quad _Z24__device_stub___relubackiPfS_
.size _Z9_relubackiPfS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9_relubackiPfS_"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub___relubackiPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9_relubackiPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void even(int *darr, int n) {
int k = threadIdx.x;
int t;
k = k * 2;
if (k <= n - 2) {
if (darr[k] > darr[k + 1]) {
t = darr[k];
darr[k] = darr[k + 1];
darr[k + 1] = t;
}
}
} | code for sm_80
Function : _Z4evenPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ ULDC UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ UIADD3 UR4, UR4, -0x2, URZ ; /* 0xfffffffe04047890 */
/* 0x000fe2000fffe03f */
/*0040*/ SHF.L.U32 R2, R2, 0x1, RZ ; /* 0x0000000102027819 */
/* 0x001fca00000006ff */
/*0050*/ ISETP.GT.AND P0, PT, R2, UR4, PT ; /* 0x0000000402007c0c */
/* 0x000fda000bf04270 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0090*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*00a0*/ LDG.E R5, [R2.64+0x4] ; /* 0x0000040402057981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ ISETP.GT.AND P0, PT, R0, R5, PT ; /* 0x000000050000720c */
/* 0x004fda0003f04270 */
/*00d0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00e0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101904 */
/*00f0*/ STG.E [R2.64+0x4], R0 ; /* 0x0000040002007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void even(int *darr, int n) {
int k = threadIdx.x;
int t;
k = k * 2;
if (k <= n - 2) {
if (darr[k] > darr[k + 1]) {
t = darr[k];
darr[k] = darr[k + 1];
darr[k + 1] = t;
}
}
} | .file "tmpxft_00051893_00000000-6_even.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z24__device_stub__Z4evenPiiPii
.type _Z24__device_stub__Z4evenPiiPii, @function
_Z24__device_stub__Z4evenPiiPii:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4evenPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z24__device_stub__Z4evenPiiPii, .-_Z24__device_stub__Z4evenPiiPii
.globl _Z4evenPii
.type _Z4evenPii, @function
_Z4evenPii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z24__device_stub__Z4evenPiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z4evenPii, .-_Z4evenPii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z4evenPii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z4evenPii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void even(int *darr, int n) {
int k = threadIdx.x;
int t;
k = k * 2;
if (k <= n - 2) {
if (darr[k] > darr[k + 1]) {
t = darr[k];
darr[k] = darr[k + 1];
darr[k + 1] = t;
}
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void even(int *darr, int n) {
int k = threadIdx.x;
int t;
k = k * 2;
if (k <= n - 2) {
if (darr[k] > darr[k + 1]) {
t = darr[k];
darr[k] = darr[k + 1];
darr[k + 1] = t;
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void even(int *darr, int n) {
int k = threadIdx.x;
int t;
k = k * 2;
if (k <= n - 2) {
if (darr[k] > darr[k + 1]) {
t = darr[k];
darr[k] = darr[k + 1];
darr[k + 1] = t;
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4evenPii
.globl _Z4evenPii
.p2align 8
.type _Z4evenPii,@function
_Z4evenPii:
s_load_b32 s2, s[0:1], 0x8
v_lshlrev_b32_e32 v0, 1, v0
s_waitcnt lgkmcnt(0)
s_add_i32 s2, s2, -2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_ge_i32_e32 vcc_lo, s2, v0
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_3
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_or_b32_e32 v2, 4, v3
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v0, v3, s[0:1]
global_load_b32 v1, v2, s[0:1]
s_waitcnt vmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, v0, v1
s_and_b32 exec_lo, exec_lo, vcc_lo
v_add_co_u32 v3, s2, s0, v3
v_add_co_u32 v5, s0, s0, v2
v_add_co_ci_u32_e64 v4, null, s1, 0, s2
v_add_co_ci_u32_e64 v6, null, s1, 0, s0
s_clause 0x1
global_store_b32 v[3:4], v1, off
global_store_b32 v[5:6], v0, off
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4evenPii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 12
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 3
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4evenPii, .Lfunc_end0-_Z4evenPii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 12
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4evenPii
.private_segment_fixed_size: 0
.sgpr_count: 5
.sgpr_spill_count: 0
.symbol: _Z4evenPii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void even(int *darr, int n) {
int k = threadIdx.x;
int t;
k = k * 2;
if (k <= n - 2) {
if (darr[k] > darr[k + 1]) {
t = darr[k];
darr[k] = darr[k + 1];
darr[k + 1] = t;
}
}
} | .text
.file "even.hip"
.globl _Z19__device_stub__evenPii # -- Begin function _Z19__device_stub__evenPii
.p2align 4, 0x90
.type _Z19__device_stub__evenPii,@function
_Z19__device_stub__evenPii: # @_Z19__device_stub__evenPii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z4evenPii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z19__device_stub__evenPii, .Lfunc_end0-_Z19__device_stub__evenPii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4evenPii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4evenPii,@object # @_Z4evenPii
.section .rodata,"a",@progbits
.globl _Z4evenPii
.p2align 3, 0x0
_Z4evenPii:
.quad _Z19__device_stub__evenPii
.size _Z4evenPii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z4evenPii"
.size .L__unnamed_1, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__evenPii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4evenPii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z4evenPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ ULDC UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ UIADD3 UR4, UR4, -0x2, URZ ; /* 0xfffffffe04047890 */
/* 0x000fe2000fffe03f */
/*0040*/ SHF.L.U32 R2, R2, 0x1, RZ ; /* 0x0000000102027819 */
/* 0x001fca00000006ff */
/*0050*/ ISETP.GT.AND P0, PT, R2, UR4, PT ; /* 0x0000000402007c0c */
/* 0x000fda000bf04270 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0090*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*00a0*/ LDG.E R5, [R2.64+0x4] ; /* 0x0000040402057981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ ISETP.GT.AND P0, PT, R0, R5, PT ; /* 0x000000050000720c */
/* 0x004fda0003f04270 */
/*00d0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00e0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101904 */
/*00f0*/ STG.E [R2.64+0x4], R0 ; /* 0x0000040002007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4evenPii
.globl _Z4evenPii
.p2align 8
.type _Z4evenPii,@function
_Z4evenPii:
s_load_b32 s2, s[0:1], 0x8
v_lshlrev_b32_e32 v0, 1, v0
s_waitcnt lgkmcnt(0)
s_add_i32 s2, s2, -2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_ge_i32_e32 vcc_lo, s2, v0
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_3
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_or_b32_e32 v2, 4, v3
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v0, v3, s[0:1]
global_load_b32 v1, v2, s[0:1]
s_waitcnt vmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, v0, v1
s_and_b32 exec_lo, exec_lo, vcc_lo
v_add_co_u32 v3, s2, s0, v3
v_add_co_u32 v5, s0, s0, v2
v_add_co_ci_u32_e64 v4, null, s1, 0, s2
v_add_co_ci_u32_e64 v6, null, s1, 0, s0
s_clause 0x1
global_store_b32 v[3:4], v1, off
global_store_b32 v[5:6], v0, off
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4evenPii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 12
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 3
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4evenPii, .Lfunc_end0-_Z4evenPii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 12
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4evenPii
.private_segment_fixed_size: 0
.sgpr_count: 5
.sgpr_spill_count: 0
.symbol: _Z4evenPii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00051893_00000000-6_even.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z24__device_stub__Z4evenPiiPii
.type _Z24__device_stub__Z4evenPiiPii, @function
_Z24__device_stub__Z4evenPiiPii:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4evenPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z24__device_stub__Z4evenPiiPii, .-_Z24__device_stub__Z4evenPiiPii
.globl _Z4evenPii
.type _Z4evenPii, @function
_Z4evenPii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z24__device_stub__Z4evenPiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z4evenPii, .-_Z4evenPii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z4evenPii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z4evenPii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "even.hip"
.globl _Z19__device_stub__evenPii # -- Begin function _Z19__device_stub__evenPii
.p2align 4, 0x90
.type _Z19__device_stub__evenPii,@function
_Z19__device_stub__evenPii: # @_Z19__device_stub__evenPii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z4evenPii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z19__device_stub__evenPii, .Lfunc_end0-_Z19__device_stub__evenPii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4evenPii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4evenPii,@object # @_Z4evenPii
.section .rodata,"a",@progbits
.globl _Z4evenPii
.p2align 3, 0x0
_Z4evenPii:
.quad _Z19__device_stub__evenPii
.size _Z4evenPii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z4evenPii"
.size .L__unnamed_1, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__evenPii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4evenPii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void matrixClip(double *a, double min, double max, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
if(a[y * cc + x] > max){
c[y * cc + x] = max;
}else{
if(a[y * cc + x] < min){
c[y * cc + x] = min;
}else{
c[y * cc + x] = a[y * cc + x];
}
}
}
} | code for sm_80
Function : _Z10matrixClipPdddS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x184], PT ; /* 0x0000610000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x180], P0 ; /* 0x0000600003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R5, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff057435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R3, c[0x0][0x184], R0 ; /* 0x0000610003007a24 */
/* 0x000fe200078e0200 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fcc00078e0205 */
/*00e0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*00f0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x178] ; /* 0x00005e0000047625 */
/* 0x000fe200078e0205 */
/*0100*/ DSETP.GT.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c000200762a */
/* 0x004e1c0003f04000 */
/*0110*/ @P0 BRA 0x190 ; /* 0x0000007000000947 */
/* 0x001fea0003800000 */
/*0120*/ DSETP.GEU.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a000200762a */
/* 0x000e1c0003f0e000 */
/*0130*/ @P0 STG.E.64 [R4.64], R2 ; /* 0x0000000204000986 */
/* 0x0011e2000c101b04 */
/*0140*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0150*/ MOV R2, c[0x0][0x168] ; /* 0x00005a0000027a02 */
/* 0x001fe40000000f00 */
/*0160*/ MOV R3, c[0x0][0x16c] ; /* 0x00005b0000037a02 */
/* 0x000fca0000000f00 */
/*0170*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */
/* 0x000fe2000c101b04 */
/*0180*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0190*/ MOV R2, c[0x0][0x170] ; /* 0x00005c0000027a02 */
/* 0x000fe40000000f00 */
/*01a0*/ MOV R3, c[0x0][0x174] ; /* 0x00005d0000037a02 */
/* 0x000fca0000000f00 */
/*01b0*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */
/* 0x000fe2000c101b04 */
/*01c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void matrixClip(double *a, double min, double max, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
if(a[y * cc + x] > max){
c[y * cc + x] = max;
}else{
if(a[y * cc + x] < min){
c[y * cc + x] = min;
}else{
c[y * cc + x] = a[y * cc + x];
}
}
}
} | .file "tmpxft_00029bda_00000000-6_matrixClip.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z36__device_stub__Z10matrixClipPdddS_iiPdddS_ii
.type _Z36__device_stub__Z10matrixClipPdddS_iiPdddS_ii, @function
_Z36__device_stub__Z10matrixClipPdddS_iiPdddS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movsd %xmm0, 32(%rsp)
movsd %xmm1, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z10matrixClipPdddS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z36__device_stub__Z10matrixClipPdddS_iiPdddS_ii, .-_Z36__device_stub__Z10matrixClipPdddS_iiPdddS_ii
.globl _Z10matrixClipPdddS_ii
.type _Z10matrixClipPdddS_ii, @function
_Z10matrixClipPdddS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z10matrixClipPdddS_iiPdddS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z10matrixClipPdddS_ii, .-_Z10matrixClipPdddS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10matrixClipPdddS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10matrixClipPdddS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void matrixClip(double *a, double min, double max, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
if(a[y * cc + x] > max){
c[y * cc + x] = max;
}else{
if(a[y * cc + x] < min){
c[y * cc + x] = min;
}else{
c[y * cc + x] = a[y * cc + x];
}
}
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void matrixClip(double *a, double min, double max, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
if(a[y * cc + x] > max){
c[y * cc + x] = max;
}else{
if(a[y * cc + x] < min){
c[y * cc + x] = min;
}else{
c[y * cc + x] = a[y * cc + x];
}
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void matrixClip(double *a, double min, double max, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
if(a[y * cc + x] > max){
c[y * cc + x] = max;
}else{
if(a[y * cc + x] < min){
c[y * cc + x] = min;
}else{
c[y * cc + x] = a[y * cc + x];
}
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10matrixClipPdddS_ii
.globl _Z10matrixClipPdddS_ii
.p2align 8
.type _Z10matrixClipPdddS_ii,@function
_Z10matrixClipPdddS_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x34
s_load_b64 s[2:3], s[0:1], 0x20
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s4, 0xffff
s_lshr_b32 s4, s4, 16
v_mad_u64_u32 v[2:3], null, s14, s5, v[1:2]
v_mad_u64_u32 v[3:4], null, s15, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s3, v2
v_cmp_gt_i32_e64 s2, s2, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB0_9
s_clause 0x1
s_load_b64 s[8:9], s[0:1], 0x0
s_load_b128 s[4:7], s[0:1], 0x10
v_mad_u64_u32 v[0:1], null, v3, s3, v[2:3]
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[4:5], 3, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s8, v4
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v5, vcc_lo
global_load_b64 v[2:3], v[2:3], off
s_waitcnt vmcnt(0)
v_cmpx_nlt_f64_e32 s[4:5], v[2:3]
s_xor_b32 s8, exec_lo, s2
s_cbranch_execz .LBB0_7
s_load_b64 s[2:3], s[0:1], 0x8
v_add_co_u32 v0, s0, s6, v4
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v1, s0, s7, v5, s0
s_mov_b32 s0, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_ngt_f64_e32 s[2:3], v[2:3]
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB0_4
global_store_b64 v[0:1], v[2:3], off
.LBB0_4:
s_and_not1_saveexec_b32 s0, s0
s_cbranch_execz .LBB0_6
v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
global_store_b64 v[0:1], v[2:3], off
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s0
.LBB0_7:
s_and_not1_saveexec_b32 s0, s8
s_cbranch_execz .LBB0_9
v_lshlrev_b64 v[0:1], 3, v[0:1]
v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_store_b64 v[0:1], v[2:3], off
.LBB0_9:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10matrixClipPdddS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10matrixClipPdddS_ii, .Lfunc_end0-_Z10matrixClipPdddS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10matrixClipPdddS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10matrixClipPdddS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void matrixClip(double *a, double min, double max, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
if(a[y * cc + x] > max){
c[y * cc + x] = max;
}else{
if(a[y * cc + x] < min){
c[y * cc + x] = min;
}else{
c[y * cc + x] = a[y * cc + x];
}
}
}
} | .text
.file "matrixClip.hip"
.globl _Z25__device_stub__matrixClipPdddS_ii # -- Begin function _Z25__device_stub__matrixClipPdddS_ii
.p2align 4, 0x90
.type _Z25__device_stub__matrixClipPdddS_ii,@function
_Z25__device_stub__matrixClipPdddS_ii: # @_Z25__device_stub__matrixClipPdddS_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movsd %xmm0, 80(%rsp)
movsd %xmm1, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z10matrixClipPdddS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z25__device_stub__matrixClipPdddS_ii, .Lfunc_end0-_Z25__device_stub__matrixClipPdddS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10matrixClipPdddS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10matrixClipPdddS_ii,@object # @_Z10matrixClipPdddS_ii
.section .rodata,"a",@progbits
.globl _Z10matrixClipPdddS_ii
.p2align 3, 0x0
_Z10matrixClipPdddS_ii:
.quad _Z25__device_stub__matrixClipPdddS_ii
.size _Z10matrixClipPdddS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10matrixClipPdddS_ii"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__matrixClipPdddS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10matrixClipPdddS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10matrixClipPdddS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x184], PT ; /* 0x0000610000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x180], P0 ; /* 0x0000600003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R5, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff057435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R3, c[0x0][0x184], R0 ; /* 0x0000610003007a24 */
/* 0x000fe200078e0200 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fcc00078e0205 */
/*00e0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*00f0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x178] ; /* 0x00005e0000047625 */
/* 0x000fe200078e0205 */
/*0100*/ DSETP.GT.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c000200762a */
/* 0x004e1c0003f04000 */
/*0110*/ @P0 BRA 0x190 ; /* 0x0000007000000947 */
/* 0x001fea0003800000 */
/*0120*/ DSETP.GEU.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a000200762a */
/* 0x000e1c0003f0e000 */
/*0130*/ @P0 STG.E.64 [R4.64], R2 ; /* 0x0000000204000986 */
/* 0x0011e2000c101b04 */
/*0140*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0150*/ MOV R2, c[0x0][0x168] ; /* 0x00005a0000027a02 */
/* 0x001fe40000000f00 */
/*0160*/ MOV R3, c[0x0][0x16c] ; /* 0x00005b0000037a02 */
/* 0x000fca0000000f00 */
/*0170*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */
/* 0x000fe2000c101b04 */
/*0180*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0190*/ MOV R2, c[0x0][0x170] ; /* 0x00005c0000027a02 */
/* 0x000fe40000000f00 */
/*01a0*/ MOV R3, c[0x0][0x174] ; /* 0x00005d0000037a02 */
/* 0x000fca0000000f00 */
/*01b0*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */
/* 0x000fe2000c101b04 */
/*01c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10matrixClipPdddS_ii
.globl _Z10matrixClipPdddS_ii
.p2align 8
.type _Z10matrixClipPdddS_ii,@function
_Z10matrixClipPdddS_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x34
s_load_b64 s[2:3], s[0:1], 0x20
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s4, 0xffff
s_lshr_b32 s4, s4, 16
v_mad_u64_u32 v[2:3], null, s14, s5, v[1:2]
v_mad_u64_u32 v[3:4], null, s15, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s3, v2
v_cmp_gt_i32_e64 s2, s2, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB0_9
s_clause 0x1
s_load_b64 s[8:9], s[0:1], 0x0
s_load_b128 s[4:7], s[0:1], 0x10
v_mad_u64_u32 v[0:1], null, v3, s3, v[2:3]
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[4:5], 3, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s8, v4
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v5, vcc_lo
global_load_b64 v[2:3], v[2:3], off
s_waitcnt vmcnt(0)
v_cmpx_nlt_f64_e32 s[4:5], v[2:3]
s_xor_b32 s8, exec_lo, s2
s_cbranch_execz .LBB0_7
s_load_b64 s[2:3], s[0:1], 0x8
v_add_co_u32 v0, s0, s6, v4
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v1, s0, s7, v5, s0
s_mov_b32 s0, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_ngt_f64_e32 s[2:3], v[2:3]
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB0_4
global_store_b64 v[0:1], v[2:3], off
.LBB0_4:
s_and_not1_saveexec_b32 s0, s0
s_cbranch_execz .LBB0_6
v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
global_store_b64 v[0:1], v[2:3], off
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s0
.LBB0_7:
s_and_not1_saveexec_b32 s0, s8
s_cbranch_execz .LBB0_9
v_lshlrev_b64 v[0:1], 3, v[0:1]
v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_store_b64 v[0:1], v[2:3], off
.LBB0_9:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10matrixClipPdddS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10matrixClipPdddS_ii, .Lfunc_end0-_Z10matrixClipPdddS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10matrixClipPdddS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10matrixClipPdddS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00029bda_00000000-6_matrixClip.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z36__device_stub__Z10matrixClipPdddS_iiPdddS_ii
.type _Z36__device_stub__Z10matrixClipPdddS_iiPdddS_ii, @function
_Z36__device_stub__Z10matrixClipPdddS_iiPdddS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movsd %xmm0, 32(%rsp)
movsd %xmm1, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z10matrixClipPdddS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z36__device_stub__Z10matrixClipPdddS_iiPdddS_ii, .-_Z36__device_stub__Z10matrixClipPdddS_iiPdddS_ii
.globl _Z10matrixClipPdddS_ii
.type _Z10matrixClipPdddS_ii, @function
_Z10matrixClipPdddS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z10matrixClipPdddS_iiPdddS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z10matrixClipPdddS_ii, .-_Z10matrixClipPdddS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10matrixClipPdddS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10matrixClipPdddS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "matrixClip.hip"
.globl _Z25__device_stub__matrixClipPdddS_ii # -- Begin function _Z25__device_stub__matrixClipPdddS_ii
.p2align 4, 0x90
.type _Z25__device_stub__matrixClipPdddS_ii,@function
_Z25__device_stub__matrixClipPdddS_ii: # @_Z25__device_stub__matrixClipPdddS_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movsd %xmm0, 80(%rsp)
movsd %xmm1, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z10matrixClipPdddS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z25__device_stub__matrixClipPdddS_ii, .Lfunc_end0-_Z25__device_stub__matrixClipPdddS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10matrixClipPdddS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10matrixClipPdddS_ii,@object # @_Z10matrixClipPdddS_ii
.section .rodata,"a",@progbits
.globl _Z10matrixClipPdddS_ii
.p2align 3, 0x0
_Z10matrixClipPdddS_ii:
.quad _Z25__device_stub__matrixClipPdddS_ii
.size _Z10matrixClipPdddS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10matrixClipPdddS_ii"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__matrixClipPdddS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10matrixClipPdddS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "stdio.h"
#include <cuda.h>
//#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
//# error "printf not defined for CUDA_ARCH" ##__CUDA_ARCH__
//#endif
__global__ void helloCuda( float f )
{
printf( "Hello thread %d, f = %f\n", threadIdx.x, f );
}
#ifdef __CUDACC__
#warning cuda cc
#endif
int main( int argc, char** argv )
{
helloCuda<<< 1, 5 >>>( 3.14159 );
cudaDeviceSynchronize();
return 0;
} | code for sm_80
Function : _Z9helloCudaf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fc600078e00ff */
/*0010*/ S2R R10, SR_TID.X ; /* 0x00000000000a7919 */
/* 0x000e220000002100 */
/*0020*/ F2F.F64.F32 R2, c[0x0][0x160] ; /* 0x0000580000027b10 */
/* 0x000e620000201800 */
/*0030*/ IADD3 R1, R1, -0x10, RZ ; /* 0xfffffff001017810 */
/* 0x000fe20007ffe0ff */
/*0040*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0050*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0060*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0070*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */
/* 0x000fe40007f1e0ff */
/*0080*/ LDC.64 R8, c[0x4][R0] ; /* 0x0100000000087b82 */
/* 0x0004e60000000a00 */
/*0090*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */
/* 0x000fe200000e06ff */
/*00a0*/ STL.64 [R1+0x8], R2 ; /* 0x0000080201007387 */
/* 0x0025e80000100a00 */
/*00b0*/ STL [R1], R10 ; /* 0x0000000a01007387 */
/* 0x0015e60000100800 */
/*00c0*/ LEPC R2 ; /* 0x000000000002734e */
/* 0x004fe40000000000 */
/*00d0*/ MOV R11, 0x140 ; /* 0x00000140000b7802 */
/* 0x000fe40000000f00 */
/*00e0*/ MOV R20, 0xc0 ; /* 0x000000c000147802 */
/* 0x000fc40000000f00 */
/*00f0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*0100*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*0110*/ IADD3 R20, P0, P1, -R20, R11, R2 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e102 */
/*0120*/ IADD3.X R21, ~R0, R21, R3, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2503 */
/*0130*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */
/* 0x008fea0003c00000 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "stdio.h"
#include <cuda.h>
//#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
//# error "printf not defined for CUDA_ARCH" ##__CUDA_ARCH__
//#endif
__global__ void helloCuda( float f )
{
printf( "Hello thread %d, f = %f\n", threadIdx.x, f );
}
#ifdef __CUDACC__
#warning cuda cc
#endif
int main( int argc, char** argv )
{
helloCuda<<< 1, 5 >>>( 3.14159 );
cudaDeviceSynchronize();
return 0;
} | .file "tmpxft_00109008_00000000-6_printf_test.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z9helloCudaff
.type _Z27__device_stub__Z9helloCudaff, @function
_Z27__device_stub__Z9helloCudaff:
.LFB2082:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movss %xmm0, 12(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z9helloCudaf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z9helloCudaff, .-_Z27__device_stub__Z9helloCudaff
.globl _Z9helloCudaf
.type _Z9helloCudaf, @function
_Z9helloCudaf:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z9helloCudaff
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9helloCudaf, .-_Z9helloCudaf
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movl $5, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
call cudaDeviceSynchronize@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movss .LC0(%rip), %xmm0
call _Z27__device_stub__Z9helloCudaff
jmp .L12
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z9helloCudaf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z9helloCudaf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1078530000
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "stdio.h"
#include <cuda.h>
//#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
//# error "printf not defined for CUDA_ARCH" ##__CUDA_ARCH__
//#endif
__global__ void helloCuda( float f )
{
printf( "Hello thread %d, f = %f\n", threadIdx.x, f );
}
#ifdef __CUDACC__
#warning cuda cc
#endif
int main( int argc, char** argv )
{
helloCuda<<< 1, 5 >>>( 3.14159 );
cudaDeviceSynchronize();
return 0;
} | #include "stdio.h"
#include <hip/hip_runtime.h>
//#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
//# error "printf not defined for CUDA_ARCH" ##__CUDA_ARCH__
//#endif
__global__ void helloCuda( float f )
{
printf( "Hello thread %d, f = %f\n", threadIdx.x, f );
}
#ifdef __HIPCC__
#warning cuda cc
#endif
int main( int argc, char** argv )
{
helloCuda<<< 1, 5 >>>( 3.14159 );
hipDeviceSynchronize();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include "stdio.h"
#include <hip/hip_runtime.h>
//#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
//# error "printf not defined for CUDA_ARCH" ##__CUDA_ARCH__
//#endif
__global__ void helloCuda( float f )
{
printf( "Hello thread %d, f = %f\n", threadIdx.x, f );
}
#ifdef __HIPCC__
#warning cuda cc
#endif
int main( int argc, char** argv )
{
helloCuda<<< 1, 5 >>>( 3.14159 );
hipDeviceSynchronize();
return 0;
} | .text
.file "printf_test.hip"
.globl _Z24__device_stub__helloCudaf # -- Begin function _Z24__device_stub__helloCudaf
.p2align 4, 0x90
.type _Z24__device_stub__helloCudaf,@function
_Z24__device_stub__helloCudaf: # @_Z24__device_stub__helloCudaf
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movss %xmm0, 12(%rsp)
leaq 12(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z9helloCudaf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z24__device_stub__helloCudaf, .Lfunc_end0-_Z24__device_stub__helloCudaf
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 4(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movl $1078530000, 12(%rsp) # imm = 0x40490FD0
leaq 12(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z9helloCudaf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
xorl %eax, %eax
addq $72, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9helloCudaf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9helloCudaf,@object # @_Z9helloCudaf
.section .rodata,"a",@progbits
.globl _Z9helloCudaf
.p2align 3, 0x0
_Z9helloCudaf:
.quad _Z24__device_stub__helloCudaf
.size _Z9helloCudaf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9helloCudaf"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__helloCudaf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9helloCudaf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00109008_00000000-6_printf_test.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z9helloCudaff
.type _Z27__device_stub__Z9helloCudaff, @function
_Z27__device_stub__Z9helloCudaff:
.LFB2082:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movss %xmm0, 12(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z9helloCudaf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z9helloCudaff, .-_Z27__device_stub__Z9helloCudaff
.globl _Z9helloCudaf
.type _Z9helloCudaf, @function
_Z9helloCudaf:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z9helloCudaff
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9helloCudaf, .-_Z9helloCudaf
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movl $5, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
call cudaDeviceSynchronize@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movss .LC0(%rip), %xmm0
call _Z27__device_stub__Z9helloCudaff
jmp .L12
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z9helloCudaf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z9helloCudaf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1078530000
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "printf_test.hip"
.globl _Z24__device_stub__helloCudaf # -- Begin function _Z24__device_stub__helloCudaf
.p2align 4, 0x90
.type _Z24__device_stub__helloCudaf,@function
_Z24__device_stub__helloCudaf: # @_Z24__device_stub__helloCudaf
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movss %xmm0, 12(%rsp)
leaq 12(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z9helloCudaf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z24__device_stub__helloCudaf, .Lfunc_end0-_Z24__device_stub__helloCudaf
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 4(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movl $1078530000, 12(%rsp) # imm = 0x40490FD0
leaq 12(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z9helloCudaf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
xorl %eax, %eax
addq $72, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9helloCudaf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9helloCudaf,@object # @_Z9helloCudaf
.section .rodata,"a",@progbits
.globl _Z9helloCudaf
.p2align 3, 0x0
_Z9helloCudaf:
.quad _Z24__device_stub__helloCudaf
.size _Z9helloCudaf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9helloCudaf"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__helloCudaf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9helloCudaf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <thrust/sort.h>
#include <thrust/merge.h>
#define NUM_SETS 100000
#define DSIZE 100
typedef int mytype;
// for ascending sorted data
#define cmp(A,B) ((A)<(B))
#define nTPB 512
#define nBLK 128
#include <time.h>
#include <sys/time.h>
#define USECPSEC 1000000ULL
long long dtime_usec(unsigned long long start){
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
template <typename T>
__host__ __device__ void smerge(const T * __restrict__ a, const T * __restrict__ b, T * __restrict__ c, const unsigned len_a, const unsigned len_b, const unsigned stride_a = 1, const unsigned stride_b = 1, const unsigned stride_c = 1){
unsigned len_c = len_a+len_b;
unsigned nc = 0;
unsigned na = 0;
unsigned nb = 0;
unsigned fa = (len_b == 0);
unsigned fb = (len_a == 0);
T nxta = a[0];
T nxtb = b[0];
while (nc < len_c){
if (fa) {c[stride_c*nc++] = nxta; na++; nxta = a[stride_a*na];}
else if (fb) {c[stride_c*nc++] = nxtb; nb++; nxtb = b[stride_b*nb];}
else if (cmp(nxta,nxtb)){
c[stride_c*nc++] = nxta;
na++;
if (na == len_a) fb++;
else nxta = a[stride_a*na];}
else {
c[stride_c*nc++] = nxtb;
nb++;
if (nb == len_b) fa++;
else nxtb = b[stride_b*nb];}}
}
template <typename T>
__global__ void rmtest(const T * __restrict__ a, const T * __restrict__ b, T * __restrict__ c, int num_arr, int len){
int idx=threadIdx.x+blockDim.x*blockIdx.x;
while (idx < num_arr){
int sel=idx*len;
smerge(a+sel, b+sel, c+(2*sel), len, len);
idx += blockDim.x*gridDim.x;}
}
template <typename T>
__global__ void cmtest(const T * __restrict__ a, const T * __restrict__ b, T * __restrict__ c, int num_arr, int len, int stride_a, int stride_b, int stride_c){
int idx=threadIdx.x+blockDim.x*blockIdx.x;
while (idx < num_arr){
smerge(a+idx, b+idx, c+idx, len, len, stride_a, stride_b, stride_c);
idx += blockDim.x*gridDim.x;}
}
template <typename T>
int rmvalidate(T *a, T *b, T *c, int num_arr, int len){
T *vc = (T *)malloc(2*len*sizeof(T));
for (int i = 0; i < num_arr; i++){
thrust::merge(a+(i*len), a+((i+1)*len), b+(i*len), b+((i+1)*len), vc);
#ifndef TIMING
for (int j = 0; j < len*2; j++)
if (vc[j] != c[(i*2*len)+j]) {printf("rm mismatch i: %d, j: %d, was: %d, should be: %d\n", i, j, c[(i*2*len)+j], vc[j]); return 0;}
#endif
}
return 1;
}
template <typename T>
int cmvalidate(const T *c1, const T *c2, int num_arr, int len){
for (int i = 0; i < num_arr; i++)
for (int j = 0; j < 2*len; j++)
if (c1[i*(2*len)+j] != c2[j*(num_arr)+i]) {printf("cm mismatch i: %d, j: %d, was: %d, should be: %d\n", i, j, c2[j*(num_arr)+i], c1[i*(2*len)+j]); return 0;}
return 1;
}
int main(){
mytype *h_a, *h_b, *h_c, *d_a, *d_b, *d_c;
h_a = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
h_b = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
h_c = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype)*2);
cudaMalloc(&d_a, (DSIZE*NUM_SETS+1)*sizeof(mytype));
cudaMalloc(&d_b, (DSIZE*NUM_SETS+1)*sizeof(mytype));
cudaMalloc(&d_c, DSIZE*NUM_SETS*sizeof(mytype)*2);
// test "row-major" storage
for (int i =0; i<DSIZE*NUM_SETS; i++){
h_a[i] = rand();
h_b[i] = rand();}
thrust::sort(h_a, h_a+DSIZE*NUM_SETS);
thrust::sort(h_b, h_b+DSIZE*NUM_SETS);
cudaMemcpy(d_a, h_a, DSIZE*NUM_SETS*sizeof(mytype), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, DSIZE*NUM_SETS*sizeof(mytype), cudaMemcpyHostToDevice);
unsigned long gtime = dtime_usec(0);
rmtest<<<nBLK, nTPB>>>(d_a, d_b, d_c, NUM_SETS, DSIZE);
cudaDeviceSynchronize();
gtime = dtime_usec(gtime);
cudaMemcpy(h_c, d_c, DSIZE*NUM_SETS*2*sizeof(mytype), cudaMemcpyDeviceToHost);
unsigned long ctime = dtime_usec(0);
if (!rmvalidate(h_a, h_b, h_c, NUM_SETS, DSIZE)) {printf("fail!\n"); return 1;}
ctime = dtime_usec(ctime);
printf("CPU time: %f, GPU RM time: %f\n", ctime/(float)USECPSEC, gtime/(float)USECPSEC);
// test "col-major" storage
mytype *ch_a, *ch_b, *ch_c;
ch_a = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
ch_b = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
ch_c = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
for (int i = 0; i < NUM_SETS; i++)
for (int j = 0; j < DSIZE; j++){
ch_a[j*NUM_SETS+i] = h_a[i*DSIZE+j];
ch_b[j*NUM_SETS+i] = h_b[i*DSIZE+j];}
cudaMemcpy(d_a, ch_a, DSIZE*NUM_SETS*sizeof(mytype), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, ch_b, DSIZE*NUM_SETS*sizeof(mytype), cudaMemcpyHostToDevice);
gtime = dtime_usec(0);
cmtest<<<nBLK, nTPB>>>(d_a, d_b, d_c, NUM_SETS, DSIZE, NUM_SETS, NUM_SETS, NUM_SETS );
cudaDeviceSynchronize();
gtime = dtime_usec(gtime);
cudaMemcpy(ch_c, d_c, DSIZE*NUM_SETS*2*sizeof(mytype), cudaMemcpyDeviceToHost);
if (!cmvalidate(h_c, ch_c, NUM_SETS, DSIZE)) {printf("fail!\n"); return 1;}
printf("GPU CM time: %f\n", gtime/(float)USECPSEC);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/sort.h>
#include <thrust/merge.h>
#define NUM_SETS 100000
#define DSIZE 100
typedef int mytype;
// for ascending sorted data
#define cmp(A,B) ((A)<(B))
#define nTPB 512
#define nBLK 128
#include <time.h>
#include <sys/time.h>
#define USECPSEC 1000000ULL
long long dtime_usec(unsigned long long start){
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
template <typename T>
__host__ __device__ void smerge(const T * __restrict__ a, const T * __restrict__ b, T * __restrict__ c, const unsigned len_a, const unsigned len_b, const unsigned stride_a = 1, const unsigned stride_b = 1, const unsigned stride_c = 1){
unsigned len_c = len_a+len_b;
unsigned nc = 0;
unsigned na = 0;
unsigned nb = 0;
unsigned fa = (len_b == 0);
unsigned fb = (len_a == 0);
T nxta = a[0];
T nxtb = b[0];
while (nc < len_c){
if (fa) {c[stride_c*nc++] = nxta; na++; nxta = a[stride_a*na];}
else if (fb) {c[stride_c*nc++] = nxtb; nb++; nxtb = b[stride_b*nb];}
else if (cmp(nxta,nxtb)){
c[stride_c*nc++] = nxta;
na++;
if (na == len_a) fb++;
else nxta = a[stride_a*na];}
else {
c[stride_c*nc++] = nxtb;
nb++;
if (nb == len_b) fa++;
else nxtb = b[stride_b*nb];}}
}
template <typename T>
__global__ void rmtest(const T * __restrict__ a, const T * __restrict__ b, T * __restrict__ c, int num_arr, int len){
int idx=threadIdx.x+blockDim.x*blockIdx.x;
while (idx < num_arr){
int sel=idx*len;
smerge(a+sel, b+sel, c+(2*sel), len, len);
idx += blockDim.x*gridDim.x;}
}
template <typename T>
__global__ void cmtest(const T * __restrict__ a, const T * __restrict__ b, T * __restrict__ c, int num_arr, int len, int stride_a, int stride_b, int stride_c){
int idx=threadIdx.x+blockDim.x*blockIdx.x;
while (idx < num_arr){
smerge(a+idx, b+idx, c+idx, len, len, stride_a, stride_b, stride_c);
idx += blockDim.x*gridDim.x;}
}
template <typename T>
int rmvalidate(T *a, T *b, T *c, int num_arr, int len){
T *vc = (T *)malloc(2*len*sizeof(T));
for (int i = 0; i < num_arr; i++){
thrust::merge(a+(i*len), a+((i+1)*len), b+(i*len), b+((i+1)*len), vc);
#ifndef TIMING
for (int j = 0; j < len*2; j++)
if (vc[j] != c[(i*2*len)+j]) {printf("rm mismatch i: %d, j: %d, was: %d, should be: %d\n", i, j, c[(i*2*len)+j], vc[j]); return 0;}
#endif
}
return 1;
}
template <typename T>
int cmvalidate(const T *c1, const T *c2, int num_arr, int len){
for (int i = 0; i < num_arr; i++)
for (int j = 0; j < 2*len; j++)
if (c1[i*(2*len)+j] != c2[j*(num_arr)+i]) {printf("cm mismatch i: %d, j: %d, was: %d, should be: %d\n", i, j, c2[j*(num_arr)+i], c1[i*(2*len)+j]); return 0;}
return 1;
}
int main(){
mytype *h_a, *h_b, *h_c, *d_a, *d_b, *d_c;
h_a = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
h_b = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
h_c = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype)*2);
hipMalloc(&d_a, (DSIZE*NUM_SETS+1)*sizeof(mytype));
hipMalloc(&d_b, (DSIZE*NUM_SETS+1)*sizeof(mytype));
hipMalloc(&d_c, DSIZE*NUM_SETS*sizeof(mytype)*2);
// test "row-major" storage
for (int i =0; i<DSIZE*NUM_SETS; i++){
h_a[i] = rand();
h_b[i] = rand();}
thrust::sort(h_a, h_a+DSIZE*NUM_SETS);
thrust::sort(h_b, h_b+DSIZE*NUM_SETS);
hipMemcpy(d_a, h_a, DSIZE*NUM_SETS*sizeof(mytype), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, DSIZE*NUM_SETS*sizeof(mytype), hipMemcpyHostToDevice);
unsigned long gtime = dtime_usec(0);
rmtest<<<nBLK, nTPB>>>(d_a, d_b, d_c, NUM_SETS, DSIZE);
hipDeviceSynchronize();
gtime = dtime_usec(gtime);
hipMemcpy(h_c, d_c, DSIZE*NUM_SETS*2*sizeof(mytype), hipMemcpyDeviceToHost);
unsigned long ctime = dtime_usec(0);
if (!rmvalidate(h_a, h_b, h_c, NUM_SETS, DSIZE)) {printf("fail!\n"); return 1;}
ctime = dtime_usec(ctime);
printf("CPU time: %f, GPU RM time: %f\n", ctime/(float)USECPSEC, gtime/(float)USECPSEC);
// test "col-major" storage
mytype *ch_a, *ch_b, *ch_c;
ch_a = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
ch_b = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
ch_c = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
for (int i = 0; i < NUM_SETS; i++)
for (int j = 0; j < DSIZE; j++){
ch_a[j*NUM_SETS+i] = h_a[i*DSIZE+j];
ch_b[j*NUM_SETS+i] = h_b[i*DSIZE+j];}
hipMemcpy(d_a, ch_a, DSIZE*NUM_SETS*sizeof(mytype), hipMemcpyHostToDevice);
hipMemcpy(d_b, ch_b, DSIZE*NUM_SETS*sizeof(mytype), hipMemcpyHostToDevice);
gtime = dtime_usec(0);
cmtest<<<nBLK, nTPB>>>(d_a, d_b, d_c, NUM_SETS, DSIZE, NUM_SETS, NUM_SETS, NUM_SETS );
hipDeviceSynchronize();
gtime = dtime_usec(gtime);
hipMemcpy(ch_c, d_c, DSIZE*NUM_SETS*2*sizeof(mytype), hipMemcpyDeviceToHost);
if (!cmvalidate(h_c, ch_c, NUM_SETS, DSIZE)) {printf("fail!\n"); return 1;}
printf("GPU CM time: %f\n", gtime/(float)USECPSEC);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/sort.h>
#include <thrust/merge.h>
#define NUM_SETS 100000
#define DSIZE 100
typedef int mytype;
// for ascending sorted data
#define cmp(A,B) ((A)<(B))
#define nTPB 512
#define nBLK 128
#include <time.h>
#include <sys/time.h>
#define USECPSEC 1000000ULL
long long dtime_usec(unsigned long long start){
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
template <typename T>
__host__ __device__ void smerge(const T * __restrict__ a, const T * __restrict__ b, T * __restrict__ c, const unsigned len_a, const unsigned len_b, const unsigned stride_a = 1, const unsigned stride_b = 1, const unsigned stride_c = 1){
unsigned len_c = len_a+len_b;
unsigned nc = 0;
unsigned na = 0;
unsigned nb = 0;
unsigned fa = (len_b == 0);
unsigned fb = (len_a == 0);
T nxta = a[0];
T nxtb = b[0];
while (nc < len_c){
if (fa) {c[stride_c*nc++] = nxta; na++; nxta = a[stride_a*na];}
else if (fb) {c[stride_c*nc++] = nxtb; nb++; nxtb = b[stride_b*nb];}
else if (cmp(nxta,nxtb)){
c[stride_c*nc++] = nxta;
na++;
if (na == len_a) fb++;
else nxta = a[stride_a*na];}
else {
c[stride_c*nc++] = nxtb;
nb++;
if (nb == len_b) fa++;
else nxtb = b[stride_b*nb];}}
}
template <typename T>
__global__ void rmtest(const T * __restrict__ a, const T * __restrict__ b, T * __restrict__ c, int num_arr, int len){
int idx=threadIdx.x+blockDim.x*blockIdx.x;
while (idx < num_arr){
int sel=idx*len;
smerge(a+sel, b+sel, c+(2*sel), len, len);
idx += blockDim.x*gridDim.x;}
}
template <typename T>
__global__ void cmtest(const T * __restrict__ a, const T * __restrict__ b, T * __restrict__ c, int num_arr, int len, int stride_a, int stride_b, int stride_c){
int idx=threadIdx.x+blockDim.x*blockIdx.x;
while (idx < num_arr){
smerge(a+idx, b+idx, c+idx, len, len, stride_a, stride_b, stride_c);
idx += blockDim.x*gridDim.x;}
}
template <typename T>
int rmvalidate(T *a, T *b, T *c, int num_arr, int len){
T *vc = (T *)malloc(2*len*sizeof(T));
for (int i = 0; i < num_arr; i++){
thrust::merge(a+(i*len), a+((i+1)*len), b+(i*len), b+((i+1)*len), vc);
#ifndef TIMING
for (int j = 0; j < len*2; j++)
if (vc[j] != c[(i*2*len)+j]) {printf("rm mismatch i: %d, j: %d, was: %d, should be: %d\n", i, j, c[(i*2*len)+j], vc[j]); return 0;}
#endif
}
return 1;
}
template <typename T>
int cmvalidate(const T *c1, const T *c2, int num_arr, int len){
for (int i = 0; i < num_arr; i++)
for (int j = 0; j < 2*len; j++)
if (c1[i*(2*len)+j] != c2[j*(num_arr)+i]) {printf("cm mismatch i: %d, j: %d, was: %d, should be: %d\n", i, j, c2[j*(num_arr)+i], c1[i*(2*len)+j]); return 0;}
return 1;
}
int main(){
mytype *h_a, *h_b, *h_c, *d_a, *d_b, *d_c;
h_a = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
h_b = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
h_c = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype)*2);
hipMalloc(&d_a, (DSIZE*NUM_SETS+1)*sizeof(mytype));
hipMalloc(&d_b, (DSIZE*NUM_SETS+1)*sizeof(mytype));
hipMalloc(&d_c, DSIZE*NUM_SETS*sizeof(mytype)*2);
// test "row-major" storage
for (int i =0; i<DSIZE*NUM_SETS; i++){
h_a[i] = rand();
h_b[i] = rand();}
thrust::sort(h_a, h_a+DSIZE*NUM_SETS);
thrust::sort(h_b, h_b+DSIZE*NUM_SETS);
hipMemcpy(d_a, h_a, DSIZE*NUM_SETS*sizeof(mytype), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, DSIZE*NUM_SETS*sizeof(mytype), hipMemcpyHostToDevice);
unsigned long gtime = dtime_usec(0);
rmtest<<<nBLK, nTPB>>>(d_a, d_b, d_c, NUM_SETS, DSIZE);
hipDeviceSynchronize();
gtime = dtime_usec(gtime);
hipMemcpy(h_c, d_c, DSIZE*NUM_SETS*2*sizeof(mytype), hipMemcpyDeviceToHost);
unsigned long ctime = dtime_usec(0);
if (!rmvalidate(h_a, h_b, h_c, NUM_SETS, DSIZE)) {printf("fail!\n"); return 1;}
ctime = dtime_usec(ctime);
printf("CPU time: %f, GPU RM time: %f\n", ctime/(float)USECPSEC, gtime/(float)USECPSEC);
// test "col-major" storage
mytype *ch_a, *ch_b, *ch_c;
ch_a = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
ch_b = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
ch_c = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
for (int i = 0; i < NUM_SETS; i++)
for (int j = 0; j < DSIZE; j++){
ch_a[j*NUM_SETS+i] = h_a[i*DSIZE+j];
ch_b[j*NUM_SETS+i] = h_b[i*DSIZE+j];}
hipMemcpy(d_a, ch_a, DSIZE*NUM_SETS*sizeof(mytype), hipMemcpyHostToDevice);
hipMemcpy(d_b, ch_b, DSIZE*NUM_SETS*sizeof(mytype), hipMemcpyHostToDevice);
gtime = dtime_usec(0);
cmtest<<<nBLK, nTPB>>>(d_a, d_b, d_c, NUM_SETS, DSIZE, NUM_SETS, NUM_SETS, NUM_SETS );
hipDeviceSynchronize();
gtime = dtime_usec(gtime);
hipMemcpy(ch_c, d_c, DSIZE*NUM_SETS*2*sizeof(mytype), hipMemcpyDeviceToHost);
if (!cmvalidate(h_c, ch_c, NUM_SETS, DSIZE)) {printf("fail!\n"); return 1;}
printf("GPU CM time: %f\n", gtime/(float)USECPSEC);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.section .text._Z6rmtestIiEvPKT_S2_PS0_ii,"axG",@progbits,_Z6rmtestIiEvPKT_S2_PS0_ii,comdat
.protected _Z6rmtestIiEvPKT_S2_PS0_ii
.globl _Z6rmtestIiEvPKT_S2_PS0_ii
.p2align 8
.type _Z6rmtestIiEvPKT_S2_PS0_ii,@function
_Z6rmtestIiEvPKT_S2_PS0_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x18
s_add_u32 s2, s0, 32
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s10, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s10, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s8, v1
s_cbranch_execz .LBB0_22
s_load_b32 s9, s[0:1], 0x1c
s_load_b32 s3, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
v_mov_b32_e32 v4, 0
s_waitcnt lgkmcnt(0)
s_lshl_b32 s2, s9, 1
v_mul_lo_u32 v2, s9, v1
s_cmp_lg_u32 s2, 0
s_mul_i32 s3, s3, s10
s_cselect_b32 s10, -1, 0
s_cmp_eq_u32 s9, 0
s_mul_i32 s12, s3, s9
s_cselect_b32 s11, -1, 0
s_lshl_b32 s12, s12, 1
v_cndmask_b32_e64 v0, 0, 1, s11
v_lshlrev_b32_e32 v2, 1, v2
s_mov_b32 s11, 0
s_branch .LBB0_3
.LBB0_2:
v_add_nc_u32_e32 v1, s3, v1
v_add_nc_u32_e32 v2, s12, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e32 vcc_lo, s8, v1
s_or_b32 s11, vcc_lo, s11
s_and_not1_b32 exec_lo, exec_lo, s11
s_cbranch_execz .LBB0_22
.LBB0_3:
s_and_not1_b32 vcc_lo, exec_lo, s10
s_cbranch_vccnz .LBB0_2
v_mul_lo_u32 v5, v1, s9
v_ashrrev_i32_e32 v3, 31, v2
v_dual_mov_b32 v9, 0 :: v_dual_mov_b32 v10, v0
v_mov_b32_e32 v15, v0
s_mov_b32 s13, s2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[11:12], 2, v[2:3]
v_mov_b32_e32 v3, v9
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[7:8], 2, v[5:6]
v_add_co_u32 v5, vcc_lo, s6, v7
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v8, vcc_lo
v_add_co_u32 v7, vcc_lo, s4, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s5, v8, vcc_lo
v_add_co_u32 v11, vcc_lo, s0, v11
global_load_b32 v13, v[5:6], off
global_load_b32 v14, v[7:8], off
v_add_co_ci_u32_e32 v12, vcc_lo, s1, v12, vcc_lo
s_branch .LBB0_9
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s17
v_mov_b32_e32 v15, 0
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s16
.LBB0_7:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s15
.LBB0_8:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s14
v_add_co_u32 v11, vcc_lo, v11, 4
v_add_co_ci_u32_e32 v12, vcc_lo, 0, v12, vcc_lo
s_add_i32 s13, s13, -1
s_cmp_lg_u32 s13, 0
s_cbranch_scc0 .LBB0_2
.LBB0_9:
s_mov_b32 s14, exec_lo
v_cmpx_ne_u32_e32 0, v15
s_xor_b32 s14, exec_lo, s14
s_cbranch_execz .LBB0_11
v_add_nc_u32_e32 v3, 1, v3
s_waitcnt vmcnt(0)
global_store_b32 v[11:12], v14, off
s_mov_b32 s15, 1
v_lshlrev_b64 v[15:16], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v15, vcc_lo, v7, v15
v_add_co_ci_u32_e32 v16, vcc_lo, v8, v16, vcc_lo
global_load_b32 v14, v[15:16], off
.LBB0_11:
s_or_saveexec_b32 s14, s14
v_mov_b32_e32 v15, s15
s_xor_b32 exec_lo, exec_lo, s14
s_cbranch_execz .LBB0_8
s_mov_b32 s15, exec_lo
v_cmpx_ne_u32_e32 0, v10
s_xor_b32 s15, exec_lo, s15
s_cbranch_execz .LBB0_14
v_dual_mov_b32 v10, v4 :: v_dual_add_nc_u32 v9, 1, v9
s_waitcnt vmcnt(0)
global_store_b32 v[11:12], v13, off
s_mov_b32 s16, 1
s_mov_b32 s17, 0
v_lshlrev_b64 v[15:16], 2, v[9:10]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v15, vcc_lo, v5, v15
v_add_co_ci_u32_e32 v16, vcc_lo, v6, v16, vcc_lo
global_load_b32 v13, v[15:16], off
.LBB0_14:
s_or_saveexec_b32 s15, s15
v_dual_mov_b32 v10, s16 :: v_dual_mov_b32 v15, s17
s_xor_b32 exec_lo, exec_lo, s15
s_cbranch_execz .LBB0_7
s_mov_b32 s16, exec_lo
s_waitcnt vmcnt(0)
v_cmpx_ge_i32_e64 v14, v13
s_xor_b32 s16, exec_lo, s16
s_cbranch_execz .LBB0_19
v_add_nc_u32_e32 v9, 1, v9
v_mov_b32_e32 v15, 1
s_mov_b32 s17, exec_lo
global_store_b32 v[11:12], v13, off
v_cmpx_ne_u32_e64 s9, v9
s_cbranch_execz .LBB0_18
v_mov_b32_e32 v10, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[15:16], 2, v[9:10]
v_add_co_u32 v15, vcc_lo, v5, v15
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v16, vcc_lo, v6, v16, vcc_lo
global_load_b32 v13, v[15:16], off
v_mov_b32_e32 v15, 0
.LBB0_18:
s_or_b32 exec_lo, exec_lo, s17
s_mov_b32 s17, 0
.LBB0_19:
s_or_saveexec_b32 s16, s16
v_mov_b32_e32 v10, s17
s_xor_b32 exec_lo, exec_lo, s16
s_cbranch_execz .LBB0_6
v_dual_mov_b32 v10, 1 :: v_dual_add_nc_u32 v3, 1, v3
s_mov_b32 s17, exec_lo
global_store_b32 v[11:12], v14, off
v_cmpx_ne_u32_e64 s9, v3
s_cbranch_execz .LBB0_5
v_lshlrev_b64 v[14:15], 2, v[3:4]
v_mov_b32_e32 v10, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v14, vcc_lo, v7, v14
v_add_co_ci_u32_e32 v15, vcc_lo, v8, v15, vcc_lo
global_load_b32 v14, v[14:15], off
s_branch .LBB0_5
.LBB0_22:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6rmtestIiEvPKT_S2_PS0_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 17
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._Z6rmtestIiEvPKT_S2_PS0_ii,"axG",@progbits,_Z6rmtestIiEvPKT_S2_PS0_ii,comdat
.Lfunc_end0:
.size _Z6rmtestIiEvPKT_S2_PS0_ii, .Lfunc_end0-_Z6rmtestIiEvPKT_S2_PS0_ii
.section .AMDGPU.csdata,"",@progbits
.section .text._Z6cmtestIiEvPKT_S2_PS0_iiiii,"axG",@progbits,_Z6cmtestIiEvPKT_S2_PS0_iiiii,comdat
.protected _Z6cmtestIiEvPKT_S2_PS0_iiiii
.globl _Z6cmtestIiEvPKT_S2_PS0_iiiii
.p2align 8
.type _Z6cmtestIiEvPKT_S2_PS0_iiiii,@function
_Z6cmtestIiEvPKT_S2_PS0_iiiii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x3c
s_load_b32 s14, s[0:1], 0x18
s_add_u32 s2, s0, 48
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s12, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s12, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s14, v1
s_cbranch_execz .LBB1_22
s_load_b128 s[4:7], s[0:1], 0x1c
s_load_b32 s16, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[8:11], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
v_mov_b32_e32 v3, 0
s_mov_b32 s13, 0
s_mov_b32 s17, 0
s_waitcnt lgkmcnt(0)
s_lshl_b32 s1, s4, 1
s_mul_i32 s16, s16, s12
s_cmp_lg_u32 s1, 0
s_cselect_b32 s15, -1, 0
s_cmp_eq_u32 s4, 0
s_cselect_b32 s0, -1, 0
s_delay_alu instid0(SALU_CYCLE_1)
v_cndmask_b32_e64 v0, 0, 1, s0
s_branch .LBB1_3
.LBB1_2:
v_add_nc_u32_e32 v1, s16, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e32 vcc_lo, s14, v1
s_or_b32 s17, vcc_lo, s17
s_and_not1_b32 exec_lo, exec_lo, s17
s_cbranch_execz .LBB1_22
.LBB1_3:
s_and_not1_b32 vcc_lo, exec_lo, s15
s_cbranch_vccnz .LBB1_2
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v12, 0
v_mov_b32_e32 v16, v0
v_mov_b32_e32 v14, 0
s_mov_b32 s12, 0
v_lshlrev_b64 v[8:9], 2, v[1:2]
v_mov_b32_e32 v2, v0
s_mov_b32 s18, s1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, s10, v8
v_add_co_ci_u32_e32 v5, vcc_lo, s11, v9, vcc_lo
v_add_co_u32 v6, vcc_lo, s8, v8
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v9, vcc_lo
v_add_co_u32 v10, vcc_lo, s2, v8
global_load_b32 v13, v[4:5], off
global_load_b32 v15, v[6:7], off
v_add_co_ci_u32_e32 v11, vcc_lo, s3, v9, vcc_lo
s_branch .LBB1_9
.LBB1_5:
s_or_b32 exec_lo, exec_lo, s21
v_mov_b32_e32 v16, 0
.LBB1_6:
s_or_b32 exec_lo, exec_lo, s0
.LBB1_7:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s20
.LBB1_8:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s19
s_add_i32 s18, s18, -1
s_add_i32 s12, s12, s7
s_cmp_lg_u32 s18, 0
s_cbranch_scc0 .LBB1_2
.LBB1_9:
s_mov_b32 s0, exec_lo
v_cmpx_ne_u32_e32 0, v16
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB1_11
v_dual_mov_b32 v9, 0 :: v_dual_add_nc_u32 v14, 1, v14
s_lshl_b64 s[20:21], s[12:13], 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v16, vcc_lo, v10, s20
v_mul_lo_u32 v8, v14, s5
v_add_co_ci_u32_e32 v17, vcc_lo, s21, v11, vcc_lo
s_mov_b32 s20, 1
s_waitcnt vmcnt(0)
global_store_b32 v[16:17], v15, off
v_lshlrev_b64 v[8:9], 2, v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, v6, v8
v_add_co_ci_u32_e32 v9, vcc_lo, v7, v9, vcc_lo
global_load_b32 v15, v[8:9], off
.LBB1_11:
s_or_saveexec_b32 s19, s0
v_mov_b32_e32 v16, s20
s_xor_b32 exec_lo, exec_lo, s19
s_cbranch_execz .LBB1_8
s_mov_b32 s0, exec_lo
v_cmpx_ne_u32_e32 0, v2
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB1_14
v_dual_mov_b32 v9, 0 :: v_dual_add_nc_u32 v12, 1, v12
s_lshl_b64 s[20:21], s[12:13], 2
s_mov_b32 s22, 0
v_add_co_u32 v16, vcc_lo, v10, s20
s_delay_alu instid0(VALU_DEP_2)
v_mul_lo_u32 v8, v12, s6
v_add_co_ci_u32_e32 v17, vcc_lo, s21, v11, vcc_lo
s_mov_b32 s21, 1
s_waitcnt vmcnt(0)
global_store_b32 v[16:17], v13, off
v_lshlrev_b64 v[8:9], 2, v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, v4, v8
v_add_co_ci_u32_e32 v9, vcc_lo, v5, v9, vcc_lo
global_load_b32 v13, v[8:9], off
.LBB1_14:
s_or_saveexec_b32 s20, s0
v_mov_b32_e32 v2, s21
v_mov_b32_e32 v16, s22
s_xor_b32 exec_lo, exec_lo, s20
s_cbranch_execz .LBB1_7
s_lshl_b64 s[22:23], s[12:13], 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v8, s0, v10, s22
v_add_co_ci_u32_e64 v9, s0, s23, v11, s0
s_mov_b32 s0, exec_lo
s_waitcnt vmcnt(0)
v_cmpx_ge_i32_e64 v15, v13
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB1_19
v_add_nc_u32_e32 v12, 1, v12
v_mov_b32_e32 v16, 1
s_mov_b32 s21, exec_lo
global_store_b32 v[8:9], v13, off
v_cmpx_ne_u32_e64 s4, v12
s_cbranch_execz .LBB1_18
v_mul_lo_u32 v2, v12, s6
v_mov_b32_e32 v16, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[8:9], 2, v[2:3]
v_add_co_u32 v8, vcc_lo, v4, v8
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v9, vcc_lo, v5, v9, vcc_lo
global_load_b32 v13, v[8:9], off
.LBB1_18:
s_or_b32 exec_lo, exec_lo, s21
s_mov_b32 s21, 0
.LBB1_19:
s_or_saveexec_b32 s0, s0
v_mov_b32_e32 v2, s21
s_xor_b32 exec_lo, exec_lo, s0
s_cbranch_execz .LBB1_6
v_add_nc_u32_e32 v14, 1, v14
v_mov_b32_e32 v2, 1
s_mov_b32 s21, exec_lo
global_store_b32 v[8:9], v15, off
v_cmpx_ne_u32_e64 s4, v14
s_cbranch_execz .LBB1_5
v_mul_lo_u32 v2, v14, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[8:9], 2, v[2:3]
v_mov_b32_e32 v2, 0
v_add_co_u32 v8, vcc_lo, v6, v8
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v9, vcc_lo, v7, v9, vcc_lo
global_load_b32 v15, v[8:9], off
s_branch .LBB1_5
.LBB1_22:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6cmtestIiEvPKT_S2_PS0_iiiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 18
.amdhsa_next_free_sgpr 24
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._Z6cmtestIiEvPKT_S2_PS0_iiiii,"axG",@progbits,_Z6cmtestIiEvPKT_S2_PS0_iiiii,comdat
.Lfunc_end1:
.size _Z6cmtestIiEvPKT_S2_PS0_iiiii, .Lfunc_end1-_Z6cmtestIiEvPKT_S2_PS0_iiiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .actual_access: read_only
.address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .actual_access: read_only
.address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .actual_access: write_only
.address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6rmtestIiEvPKT_S2_PS0_ii
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z6rmtestIiEvPKT_S2_PS0_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 17
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .actual_access: read_only
.address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .actual_access: read_only
.address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .actual_access: write_only
.address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6cmtestIiEvPKT_S2_PS0_iiiii
.private_segment_fixed_size: 0
.sgpr_count: 26
.sgpr_spill_count: 0
.symbol: _Z6cmtestIiEvPKT_S2_PS0_iiiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 18
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // Note: errors in this file will appear on the wrong line, since we copy another header file
// in to provide some utility functions (the include paths in Jitify are somewhat unreliable)
template<typename Destination, typename LHS, typename RHS>
__global__ void addArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs;
// printf("%d + %d = %d\n", lhs[kernelIndex], rhs, dst[kernelIndex]);
}
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs; }
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // Note: errors in this file will appear on the wrong line, since we copy another header file
// in to provide some utility functions (the include paths in Jitify are somewhat unreliable)
template<typename Destination, typename LHS, typename RHS>
__global__ void addArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs;
// printf("%d + %d = %d\n", lhs[kernelIndex], rhs, dst[kernelIndex]);
}
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs; }
} | .file "tmpxft_00179897_00000000-6_arithmetic.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2112:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2112:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // Note: errors in this file will appear on the wrong line, since we copy another header file
// in to provide some utility functions (the include paths in Jitify are somewhat unreliable)
template<typename Destination, typename LHS, typename RHS>
__global__ void addArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs;
// printf("%d + %d = %d\n", lhs[kernelIndex], rhs, dst[kernelIndex]);
}
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs; }
} | #include <hip/hip_runtime.h>
// Note: errors in this file will appear on the wrong line, since we copy another header file
// in to provide some utility functions (the include paths in Jitify are somewhat unreliable)
template<typename Destination, typename LHS, typename RHS>
__global__ void addArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs;
// printf("%d + %d = %d\n", lhs[kernelIndex], rhs, dst[kernelIndex]);
}
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs; }
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
// Note: errors in this file will appear on the wrong line, since we copy another header file
// in to provide some utility functions (the include paths in Jitify are somewhat unreliable)
template<typename Destination, typename LHS, typename RHS>
__global__ void addArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs;
// printf("%d + %d = %d\n", lhs[kernelIndex], rhs, dst[kernelIndex]);
}
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs; }
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
// Note: errors in this file will appear on the wrong line, since we copy another header file
// in to provide some utility functions (the include paths in Jitify are somewhat unreliable)
template<typename Destination, typename LHS, typename RHS>
__global__ void addArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs;
// printf("%d + %d = %d\n", lhs[kernelIndex], rhs, dst[kernelIndex]);
}
}
template<typename Destination, typename LHS, typename RHS>
__global__ void addArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs + rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs - rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void subArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs * rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void mulArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs / rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void divArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs < rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs > rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void lessThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs <= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs >= rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void greaterThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs == rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs,
RHS *rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs != rhs[kernelIndex]; }
}
template<typename Destination, typename LHS, typename RHS>
__global__ void elementWiseNotEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs,
RHS rhs) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs; }
} | .text
.file "arithmetic.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00179897_00000000-6_arithmetic.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2112:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2112:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "arithmetic.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "kernels.hh"
#include "conv2d.hh"
#include "matmul.hh"
#include "sigmoid.hh"
#include "softmax.hh"
#include "relu.hh"
#include "sum.hh"
#include "update.hh"
namespace gpu
{
/**
* blockDim: number of threads in a block
* gridDim: number of blocks in a grid
* blockIdx: current block index in the grid
* threadIdx: current thread index in the block
*
* call<NB_BLOCKS, NB_THREADS_PER_BLOCK>
*/
kernel_f kernels_list[512] = {
kernel_mat_mat_mul,
kernel_mat_rvect_add,
kernel_sigmoid,
kernel_mse,
kernel_softmax,
kernel_log_softmax,
kernel_softmax_cross_entropy,
kernel_conv2d,
kernel_relu,
kernel_relu_leaky,
kernel_tanh,
kernel_mse_grad,
kernel_sigmoid_grad,
kernel_mat_mul_add,
kernel_tmat_mat_mul,
kernel_mat_tmat_mul,
kernel_mat_sum_rows,
kernel_mat_sum_cols,
kernel_softmax_cross_entropy_grad,
kernel_relu_grad,
nullptr,//kernel_conv2d_bias_add,
kernel_update,
kernel_sigmoid_cross_entropy,
kernel_sigmoid_cross_entropy_grad,
kernel_conv2d_input_grad,
kernel_conv2d_kernel_grad,
kernel_argmax_acc,
kernel_moment_update,
kernel_moment_update2,
kernel_adam_update,
kernel_leaky_relu_grad,
nullptr,//kernel_conv2d_bias_add_grad,
kernel_tanh_grad,
kernel_conv2d_transpose,
kernel_conv2d_transpose_input_grad,
kernel_conv2d_transpose_kernel_grad,
kernel_add
};
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "kernels.hh"
#include "conv2d.hh"
#include "matmul.hh"
#include "sigmoid.hh"
#include "softmax.hh"
#include "relu.hh"
#include "sum.hh"
#include "update.hh"
namespace gpu
{
/**
* blockDim: number of threads in a block
* gridDim: number of blocks in a grid
* blockIdx: current block index in the grid
* threadIdx: current thread index in the block
*
* call<NB_BLOCKS, NB_THREADS_PER_BLOCK>
*/
kernel_f kernels_list[512] = {
kernel_mat_mat_mul,
kernel_mat_rvect_add,
kernel_sigmoid,
kernel_mse,
kernel_softmax,
kernel_log_softmax,
kernel_softmax_cross_entropy,
kernel_conv2d,
kernel_relu,
kernel_relu_leaky,
kernel_tanh,
kernel_mse_grad,
kernel_sigmoid_grad,
kernel_mat_mul_add,
kernel_tmat_mat_mul,
kernel_mat_tmat_mul,
kernel_mat_sum_rows,
kernel_mat_sum_cols,
kernel_softmax_cross_entropy_grad,
kernel_relu_grad,
nullptr,//kernel_conv2d_bias_add,
kernel_update,
kernel_sigmoid_cross_entropy,
kernel_sigmoid_cross_entropy_grad,
kernel_conv2d_input_grad,
kernel_conv2d_kernel_grad,
kernel_argmax_acc,
kernel_moment_update,
kernel_moment_update2,
kernel_adam_update,
kernel_leaky_relu_grad,
nullptr,//kernel_conv2d_bias_add_grad,
kernel_tanh_grad,
kernel_conv2d_transpose,
kernel_conv2d_transpose_input_grad,
kernel_conv2d_transpose_kernel_grad,
kernel_add
};
} | .file "tmpxft_00085b54_00000000-6_kernels.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2041:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2041:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl _ZN3gpu12kernels_listE
.section .data.rel,"aw"
.align 32
.type _ZN3gpu12kernels_listE, @object
.size _ZN3gpu12kernels_listE, 4096
_ZN3gpu12kernels_listE:
.quad _ZN3gpu18kernel_mat_mat_mulEPN2rt4NodeE
.quad _ZN3gpu20kernel_mat_rvect_addEPN2rt4NodeE
.quad _ZN3gpu14kernel_sigmoidEPN2rt4NodeE
.quad _ZN3gpu10kernel_mseEPN2rt4NodeE
.quad _ZN3gpu14kernel_softmaxEPN2rt4NodeE
.quad _ZN3gpu18kernel_log_softmaxEPN2rt4NodeE
.quad _ZN3gpu28kernel_softmax_cross_entropyEPN2rt4NodeE
.quad _ZN3gpu13kernel_conv2dEPN2rt4NodeE
.quad _ZN3gpu11kernel_reluEPN2rt4NodeE
.quad _ZN3gpu17kernel_relu_leakyEPN2rt4NodeE
.quad _ZN3gpu11kernel_tanhEPN2rt4NodeE
.quad _ZN3gpu15kernel_mse_gradEPN2rt4NodeE
.quad _ZN3gpu19kernel_sigmoid_gradEPN2rt4NodeE
.quad _ZN3gpu18kernel_mat_mul_addEPN2rt4NodeE
.quad _ZN3gpu19kernel_tmat_mat_mulEPN2rt4NodeE
.quad _ZN3gpu19kernel_mat_tmat_mulEPN2rt4NodeE
.quad _ZN3gpu19kernel_mat_sum_rowsEPN2rt4NodeE
.quad _ZN3gpu19kernel_mat_sum_colsEPN2rt4NodeE
.quad _ZN3gpu33kernel_softmax_cross_entropy_gradEPN2rt4NodeE
.quad _ZN3gpu16kernel_relu_gradEPN2rt4NodeE
.quad 0
.quad _ZN3gpu13kernel_updateEPN2rt4NodeE
.quad _ZN3gpu28kernel_sigmoid_cross_entropyEPN2rt4NodeE
.quad _ZN3gpu33kernel_sigmoid_cross_entropy_gradEPN2rt4NodeE
.quad _ZN3gpu24kernel_conv2d_input_gradEPN2rt4NodeE
.quad _ZN3gpu25kernel_conv2d_kernel_gradEPN2rt4NodeE
.quad _ZN3gpu17kernel_argmax_accEPN2rt4NodeE
.quad _ZN3gpu20kernel_moment_updateEPN2rt4NodeE
.quad _ZN3gpu21kernel_moment_update2EPN2rt4NodeE
.quad _ZN3gpu18kernel_adam_updateEPN2rt4NodeE
.quad _ZN3gpu22kernel_leaky_relu_gradEPN2rt4NodeE
.quad 0
.quad _ZN3gpu16kernel_tanh_gradEPN2rt4NodeE
.quad _ZN3gpu23kernel_conv2d_transposeEPN2rt4NodeE
.quad _ZN3gpu34kernel_conv2d_transpose_input_gradEPN2rt4NodeE
.quad _ZN3gpu35kernel_conv2d_transpose_kernel_gradEPN2rt4NodeE
.quad _ZN3gpu10kernel_addEPN2rt4NodeE
.zero 3800
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "kernels.hh"
#include "conv2d.hh"
#include "matmul.hh"
#include "sigmoid.hh"
#include "softmax.hh"
#include "relu.hh"
#include "sum.hh"
#include "update.hh"
namespace gpu
{
/**
* blockDim: number of threads in a block
* gridDim: number of blocks in a grid
* blockIdx: current block index in the grid
* threadIdx: current thread index in the block
*
* call<NB_BLOCKS, NB_THREADS_PER_BLOCK>
*/
kernel_f kernels_list[512] = {
kernel_mat_mat_mul,
kernel_mat_rvect_add,
kernel_sigmoid,
kernel_mse,
kernel_softmax,
kernel_log_softmax,
kernel_softmax_cross_entropy,
kernel_conv2d,
kernel_relu,
kernel_relu_leaky,
kernel_tanh,
kernel_mse_grad,
kernel_sigmoid_grad,
kernel_mat_mul_add,
kernel_tmat_mat_mul,
kernel_mat_tmat_mul,
kernel_mat_sum_rows,
kernel_mat_sum_cols,
kernel_softmax_cross_entropy_grad,
kernel_relu_grad,
nullptr,//kernel_conv2d_bias_add,
kernel_update,
kernel_sigmoid_cross_entropy,
kernel_sigmoid_cross_entropy_grad,
kernel_conv2d_input_grad,
kernel_conv2d_kernel_grad,
kernel_argmax_acc,
kernel_moment_update,
kernel_moment_update2,
kernel_adam_update,
kernel_leaky_relu_grad,
nullptr,//kernel_conv2d_bias_add_grad,
kernel_tanh_grad,
kernel_conv2d_transpose,
kernel_conv2d_transpose_input_grad,
kernel_conv2d_transpose_kernel_grad,
kernel_add
};
} | #include <hip/hip_runtime.h>
#include "kernels.hh"
#include "conv2d.hh"
#include "matmul.hh"
#include "sigmoid.hh"
#include "softmax.hh"
#include "relu.hh"
#include "sum.hh"
#include "update.hh"
namespace gpu
{
/**
* blockDim: number of threads in a block
* gridDim: number of blocks in a grid
* blockIdx: current block index in the grid
* threadIdx: current thread index in the block
*
* call<NB_BLOCKS, NB_THREADS_PER_BLOCK>
*/
kernel_f kernels_list[512] = {
kernel_mat_mat_mul,
kernel_mat_rvect_add,
kernel_sigmoid,
kernel_mse,
kernel_softmax,
kernel_log_softmax,
kernel_softmax_cross_entropy,
kernel_conv2d,
kernel_relu,
kernel_relu_leaky,
kernel_tanh,
kernel_mse_grad,
kernel_sigmoid_grad,
kernel_mat_mul_add,
kernel_tmat_mat_mul,
kernel_mat_tmat_mul,
kernel_mat_sum_rows,
kernel_mat_sum_cols,
kernel_softmax_cross_entropy_grad,
kernel_relu_grad,
nullptr,//kernel_conv2d_bias_add,
kernel_update,
kernel_sigmoid_cross_entropy,
kernel_sigmoid_cross_entropy_grad,
kernel_conv2d_input_grad,
kernel_conv2d_kernel_grad,
kernel_argmax_acc,
kernel_moment_update,
kernel_moment_update2,
kernel_adam_update,
kernel_leaky_relu_grad,
nullptr,//kernel_conv2d_bias_add_grad,
kernel_tanh_grad,
kernel_conv2d_transpose,
kernel_conv2d_transpose_input_grad,
kernel_conv2d_transpose_kernel_grad,
kernel_add
};
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "kernels.hh"
#include "conv2d.hh"
#include "matmul.hh"
#include "sigmoid.hh"
#include "softmax.hh"
#include "relu.hh"
#include "sum.hh"
#include "update.hh"
namespace gpu
{
/**
* blockDim: number of threads in a block
* gridDim: number of blocks in a grid
* blockIdx: current block index in the grid
* threadIdx: current thread index in the block
*
* call<NB_BLOCKS, NB_THREADS_PER_BLOCK>
*/
kernel_f kernels_list[512] = {
kernel_mat_mat_mul,
kernel_mat_rvect_add,
kernel_sigmoid,
kernel_mse,
kernel_softmax,
kernel_log_softmax,
kernel_softmax_cross_entropy,
kernel_conv2d,
kernel_relu,
kernel_relu_leaky,
kernel_tanh,
kernel_mse_grad,
kernel_sigmoid_grad,
kernel_mat_mul_add,
kernel_tmat_mat_mul,
kernel_mat_tmat_mul,
kernel_mat_sum_rows,
kernel_mat_sum_cols,
kernel_softmax_cross_entropy_grad,
kernel_relu_grad,
nullptr,//kernel_conv2d_bias_add,
kernel_update,
kernel_sigmoid_cross_entropy,
kernel_sigmoid_cross_entropy_grad,
kernel_conv2d_input_grad,
kernel_conv2d_kernel_grad,
kernel_argmax_acc,
kernel_moment_update,
kernel_moment_update2,
kernel_adam_update,
kernel_leaky_relu_grad,
nullptr,//kernel_conv2d_bias_add_grad,
kernel_tanh_grad,
kernel_conv2d_transpose,
kernel_conv2d_transpose_input_grad,
kernel_conv2d_transpose_kernel_grad,
kernel_add
};
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "kernels.hh"
#include "conv2d.hh"
#include "matmul.hh"
#include "sigmoid.hh"
#include "softmax.hh"
#include "relu.hh"
#include "sum.hh"
#include "update.hh"
namespace gpu
{
/**
* blockDim: number of threads in a block
* gridDim: number of blocks in a grid
* blockIdx: current block index in the grid
* threadIdx: current thread index in the block
*
* call<NB_BLOCKS, NB_THREADS_PER_BLOCK>
*/
kernel_f kernels_list[512] = {
kernel_mat_mat_mul,
kernel_mat_rvect_add,
kernel_sigmoid,
kernel_mse,
kernel_softmax,
kernel_log_softmax,
kernel_softmax_cross_entropy,
kernel_conv2d,
kernel_relu,
kernel_relu_leaky,
kernel_tanh,
kernel_mse_grad,
kernel_sigmoid_grad,
kernel_mat_mul_add,
kernel_tmat_mat_mul,
kernel_mat_tmat_mul,
kernel_mat_sum_rows,
kernel_mat_sum_cols,
kernel_softmax_cross_entropy_grad,
kernel_relu_grad,
nullptr,//kernel_conv2d_bias_add,
kernel_update,
kernel_sigmoid_cross_entropy,
kernel_sigmoid_cross_entropy_grad,
kernel_conv2d_input_grad,
kernel_conv2d_kernel_grad,
kernel_argmax_acc,
kernel_moment_update,
kernel_moment_update2,
kernel_adam_update,
kernel_leaky_relu_grad,
nullptr,//kernel_conv2d_bias_add_grad,
kernel_tanh_grad,
kernel_conv2d_transpose,
kernel_conv2d_transpose_input_grad,
kernel_conv2d_transpose_kernel_grad,
kernel_add
};
} | .text
.file "kernels.hip"
.type _ZN3gpu12kernels_listE,@object # @_ZN3gpu12kernels_listE
.data
.globl _ZN3gpu12kernels_listE
.p2align 4, 0x0
_ZN3gpu12kernels_listE:
.quad _ZN3gpu18kernel_mat_mat_mulEPN2rt4NodeE
.quad _ZN3gpu20kernel_mat_rvect_addEPN2rt4NodeE
.quad _ZN3gpu14kernel_sigmoidEPN2rt4NodeE
.quad _ZN3gpu10kernel_mseEPN2rt4NodeE
.quad _ZN3gpu14kernel_softmaxEPN2rt4NodeE
.quad _ZN3gpu18kernel_log_softmaxEPN2rt4NodeE
.quad _ZN3gpu28kernel_softmax_cross_entropyEPN2rt4NodeE
.quad _ZN3gpu13kernel_conv2dEPN2rt4NodeE
.quad _ZN3gpu11kernel_reluEPN2rt4NodeE
.quad _ZN3gpu17kernel_relu_leakyEPN2rt4NodeE
.quad _ZN3gpu11kernel_tanhEPN2rt4NodeE
.quad _ZN3gpu15kernel_mse_gradEPN2rt4NodeE
.quad _ZN3gpu19kernel_sigmoid_gradEPN2rt4NodeE
.quad _ZN3gpu18kernel_mat_mul_addEPN2rt4NodeE
.quad _ZN3gpu19kernel_tmat_mat_mulEPN2rt4NodeE
.quad _ZN3gpu19kernel_mat_tmat_mulEPN2rt4NodeE
.quad _ZN3gpu19kernel_mat_sum_rowsEPN2rt4NodeE
.quad _ZN3gpu19kernel_mat_sum_colsEPN2rt4NodeE
.quad _ZN3gpu33kernel_softmax_cross_entropy_gradEPN2rt4NodeE
.quad _ZN3gpu16kernel_relu_gradEPN2rt4NodeE
.quad 0
.quad _ZN3gpu13kernel_updateEPN2rt4NodeE
.quad _ZN3gpu28kernel_sigmoid_cross_entropyEPN2rt4NodeE
.quad _ZN3gpu33kernel_sigmoid_cross_entropy_gradEPN2rt4NodeE
.quad _ZN3gpu24kernel_conv2d_input_gradEPN2rt4NodeE
.quad _ZN3gpu25kernel_conv2d_kernel_gradEPN2rt4NodeE
.quad _ZN3gpu17kernel_argmax_accEPN2rt4NodeE
.quad _ZN3gpu20kernel_moment_updateEPN2rt4NodeE
.quad _ZN3gpu21kernel_moment_update2EPN2rt4NodeE
.quad _ZN3gpu18kernel_adam_updateEPN2rt4NodeE
.quad _ZN3gpu22kernel_leaky_relu_gradEPN2rt4NodeE
.quad 0
.quad _ZN3gpu16kernel_tanh_gradEPN2rt4NodeE
.quad _ZN3gpu23kernel_conv2d_transposeEPN2rt4NodeE
.quad _ZN3gpu34kernel_conv2d_transpose_input_gradEPN2rt4NodeE
.quad _ZN3gpu35kernel_conv2d_transpose_kernel_gradEPN2rt4NodeE
.quad _ZN3gpu10kernel_addEPN2rt4NodeE
.zero 3800
.size _ZN3gpu12kernels_listE, 4096
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZN3gpu18kernel_mat_mat_mulEPN2rt4NodeE
.addrsig_sym _ZN3gpu20kernel_mat_rvect_addEPN2rt4NodeE
.addrsig_sym _ZN3gpu14kernel_sigmoidEPN2rt4NodeE
.addrsig_sym _ZN3gpu10kernel_mseEPN2rt4NodeE
.addrsig_sym _ZN3gpu14kernel_softmaxEPN2rt4NodeE
.addrsig_sym _ZN3gpu18kernel_log_softmaxEPN2rt4NodeE
.addrsig_sym _ZN3gpu28kernel_softmax_cross_entropyEPN2rt4NodeE
.addrsig_sym _ZN3gpu13kernel_conv2dEPN2rt4NodeE
.addrsig_sym _ZN3gpu11kernel_reluEPN2rt4NodeE
.addrsig_sym _ZN3gpu17kernel_relu_leakyEPN2rt4NodeE
.addrsig_sym _ZN3gpu11kernel_tanhEPN2rt4NodeE
.addrsig_sym _ZN3gpu15kernel_mse_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu19kernel_sigmoid_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu18kernel_mat_mul_addEPN2rt4NodeE
.addrsig_sym _ZN3gpu19kernel_tmat_mat_mulEPN2rt4NodeE
.addrsig_sym _ZN3gpu19kernel_mat_tmat_mulEPN2rt4NodeE
.addrsig_sym _ZN3gpu19kernel_mat_sum_rowsEPN2rt4NodeE
.addrsig_sym _ZN3gpu19kernel_mat_sum_colsEPN2rt4NodeE
.addrsig_sym _ZN3gpu33kernel_softmax_cross_entropy_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu16kernel_relu_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu13kernel_updateEPN2rt4NodeE
.addrsig_sym _ZN3gpu28kernel_sigmoid_cross_entropyEPN2rt4NodeE
.addrsig_sym _ZN3gpu33kernel_sigmoid_cross_entropy_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu24kernel_conv2d_input_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu25kernel_conv2d_kernel_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu17kernel_argmax_accEPN2rt4NodeE
.addrsig_sym _ZN3gpu20kernel_moment_updateEPN2rt4NodeE
.addrsig_sym _ZN3gpu21kernel_moment_update2EPN2rt4NodeE
.addrsig_sym _ZN3gpu18kernel_adam_updateEPN2rt4NodeE
.addrsig_sym _ZN3gpu22kernel_leaky_relu_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu16kernel_tanh_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu23kernel_conv2d_transposeEPN2rt4NodeE
.addrsig_sym _ZN3gpu34kernel_conv2d_transpose_input_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu35kernel_conv2d_transpose_kernel_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu10kernel_addEPN2rt4NodeE
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00085b54_00000000-6_kernels.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2041:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2041:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl _ZN3gpu12kernels_listE
.section .data.rel,"aw"
.align 32
.type _ZN3gpu12kernels_listE, @object
.size _ZN3gpu12kernels_listE, 4096
_ZN3gpu12kernels_listE:
.quad _ZN3gpu18kernel_mat_mat_mulEPN2rt4NodeE
.quad _ZN3gpu20kernel_mat_rvect_addEPN2rt4NodeE
.quad _ZN3gpu14kernel_sigmoidEPN2rt4NodeE
.quad _ZN3gpu10kernel_mseEPN2rt4NodeE
.quad _ZN3gpu14kernel_softmaxEPN2rt4NodeE
.quad _ZN3gpu18kernel_log_softmaxEPN2rt4NodeE
.quad _ZN3gpu28kernel_softmax_cross_entropyEPN2rt4NodeE
.quad _ZN3gpu13kernel_conv2dEPN2rt4NodeE
.quad _ZN3gpu11kernel_reluEPN2rt4NodeE
.quad _ZN3gpu17kernel_relu_leakyEPN2rt4NodeE
.quad _ZN3gpu11kernel_tanhEPN2rt4NodeE
.quad _ZN3gpu15kernel_mse_gradEPN2rt4NodeE
.quad _ZN3gpu19kernel_sigmoid_gradEPN2rt4NodeE
.quad _ZN3gpu18kernel_mat_mul_addEPN2rt4NodeE
.quad _ZN3gpu19kernel_tmat_mat_mulEPN2rt4NodeE
.quad _ZN3gpu19kernel_mat_tmat_mulEPN2rt4NodeE
.quad _ZN3gpu19kernel_mat_sum_rowsEPN2rt4NodeE
.quad _ZN3gpu19kernel_mat_sum_colsEPN2rt4NodeE
.quad _ZN3gpu33kernel_softmax_cross_entropy_gradEPN2rt4NodeE
.quad _ZN3gpu16kernel_relu_gradEPN2rt4NodeE
.quad 0
.quad _ZN3gpu13kernel_updateEPN2rt4NodeE
.quad _ZN3gpu28kernel_sigmoid_cross_entropyEPN2rt4NodeE
.quad _ZN3gpu33kernel_sigmoid_cross_entropy_gradEPN2rt4NodeE
.quad _ZN3gpu24kernel_conv2d_input_gradEPN2rt4NodeE
.quad _ZN3gpu25kernel_conv2d_kernel_gradEPN2rt4NodeE
.quad _ZN3gpu17kernel_argmax_accEPN2rt4NodeE
.quad _ZN3gpu20kernel_moment_updateEPN2rt4NodeE
.quad _ZN3gpu21kernel_moment_update2EPN2rt4NodeE
.quad _ZN3gpu18kernel_adam_updateEPN2rt4NodeE
.quad _ZN3gpu22kernel_leaky_relu_gradEPN2rt4NodeE
.quad 0
.quad _ZN3gpu16kernel_tanh_gradEPN2rt4NodeE
.quad _ZN3gpu23kernel_conv2d_transposeEPN2rt4NodeE
.quad _ZN3gpu34kernel_conv2d_transpose_input_gradEPN2rt4NodeE
.quad _ZN3gpu35kernel_conv2d_transpose_kernel_gradEPN2rt4NodeE
.quad _ZN3gpu10kernel_addEPN2rt4NodeE
.zero 3800
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernels.hip"
.type _ZN3gpu12kernels_listE,@object # @_ZN3gpu12kernels_listE
.data
.globl _ZN3gpu12kernels_listE
.p2align 4, 0x0
_ZN3gpu12kernels_listE:
.quad _ZN3gpu18kernel_mat_mat_mulEPN2rt4NodeE
.quad _ZN3gpu20kernel_mat_rvect_addEPN2rt4NodeE
.quad _ZN3gpu14kernel_sigmoidEPN2rt4NodeE
.quad _ZN3gpu10kernel_mseEPN2rt4NodeE
.quad _ZN3gpu14kernel_softmaxEPN2rt4NodeE
.quad _ZN3gpu18kernel_log_softmaxEPN2rt4NodeE
.quad _ZN3gpu28kernel_softmax_cross_entropyEPN2rt4NodeE
.quad _ZN3gpu13kernel_conv2dEPN2rt4NodeE
.quad _ZN3gpu11kernel_reluEPN2rt4NodeE
.quad _ZN3gpu17kernel_relu_leakyEPN2rt4NodeE
.quad _ZN3gpu11kernel_tanhEPN2rt4NodeE
.quad _ZN3gpu15kernel_mse_gradEPN2rt4NodeE
.quad _ZN3gpu19kernel_sigmoid_gradEPN2rt4NodeE
.quad _ZN3gpu18kernel_mat_mul_addEPN2rt4NodeE
.quad _ZN3gpu19kernel_tmat_mat_mulEPN2rt4NodeE
.quad _ZN3gpu19kernel_mat_tmat_mulEPN2rt4NodeE
.quad _ZN3gpu19kernel_mat_sum_rowsEPN2rt4NodeE
.quad _ZN3gpu19kernel_mat_sum_colsEPN2rt4NodeE
.quad _ZN3gpu33kernel_softmax_cross_entropy_gradEPN2rt4NodeE
.quad _ZN3gpu16kernel_relu_gradEPN2rt4NodeE
.quad 0
.quad _ZN3gpu13kernel_updateEPN2rt4NodeE
.quad _ZN3gpu28kernel_sigmoid_cross_entropyEPN2rt4NodeE
.quad _ZN3gpu33kernel_sigmoid_cross_entropy_gradEPN2rt4NodeE
.quad _ZN3gpu24kernel_conv2d_input_gradEPN2rt4NodeE
.quad _ZN3gpu25kernel_conv2d_kernel_gradEPN2rt4NodeE
.quad _ZN3gpu17kernel_argmax_accEPN2rt4NodeE
.quad _ZN3gpu20kernel_moment_updateEPN2rt4NodeE
.quad _ZN3gpu21kernel_moment_update2EPN2rt4NodeE
.quad _ZN3gpu18kernel_adam_updateEPN2rt4NodeE
.quad _ZN3gpu22kernel_leaky_relu_gradEPN2rt4NodeE
.quad 0
.quad _ZN3gpu16kernel_tanh_gradEPN2rt4NodeE
.quad _ZN3gpu23kernel_conv2d_transposeEPN2rt4NodeE
.quad _ZN3gpu34kernel_conv2d_transpose_input_gradEPN2rt4NodeE
.quad _ZN3gpu35kernel_conv2d_transpose_kernel_gradEPN2rt4NodeE
.quad _ZN3gpu10kernel_addEPN2rt4NodeE
.zero 3800
.size _ZN3gpu12kernels_listE, 4096
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZN3gpu18kernel_mat_mat_mulEPN2rt4NodeE
.addrsig_sym _ZN3gpu20kernel_mat_rvect_addEPN2rt4NodeE
.addrsig_sym _ZN3gpu14kernel_sigmoidEPN2rt4NodeE
.addrsig_sym _ZN3gpu10kernel_mseEPN2rt4NodeE
.addrsig_sym _ZN3gpu14kernel_softmaxEPN2rt4NodeE
.addrsig_sym _ZN3gpu18kernel_log_softmaxEPN2rt4NodeE
.addrsig_sym _ZN3gpu28kernel_softmax_cross_entropyEPN2rt4NodeE
.addrsig_sym _ZN3gpu13kernel_conv2dEPN2rt4NodeE
.addrsig_sym _ZN3gpu11kernel_reluEPN2rt4NodeE
.addrsig_sym _ZN3gpu17kernel_relu_leakyEPN2rt4NodeE
.addrsig_sym _ZN3gpu11kernel_tanhEPN2rt4NodeE
.addrsig_sym _ZN3gpu15kernel_mse_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu19kernel_sigmoid_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu18kernel_mat_mul_addEPN2rt4NodeE
.addrsig_sym _ZN3gpu19kernel_tmat_mat_mulEPN2rt4NodeE
.addrsig_sym _ZN3gpu19kernel_mat_tmat_mulEPN2rt4NodeE
.addrsig_sym _ZN3gpu19kernel_mat_sum_rowsEPN2rt4NodeE
.addrsig_sym _ZN3gpu19kernel_mat_sum_colsEPN2rt4NodeE
.addrsig_sym _ZN3gpu33kernel_softmax_cross_entropy_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu16kernel_relu_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu13kernel_updateEPN2rt4NodeE
.addrsig_sym _ZN3gpu28kernel_sigmoid_cross_entropyEPN2rt4NodeE
.addrsig_sym _ZN3gpu33kernel_sigmoid_cross_entropy_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu24kernel_conv2d_input_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu25kernel_conv2d_kernel_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu17kernel_argmax_accEPN2rt4NodeE
.addrsig_sym _ZN3gpu20kernel_moment_updateEPN2rt4NodeE
.addrsig_sym _ZN3gpu21kernel_moment_update2EPN2rt4NodeE
.addrsig_sym _ZN3gpu18kernel_adam_updateEPN2rt4NodeE
.addrsig_sym _ZN3gpu22kernel_leaky_relu_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu16kernel_tanh_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu23kernel_conv2d_transposeEPN2rt4NodeE
.addrsig_sym _ZN3gpu34kernel_conv2d_transpose_input_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu35kernel_conv2d_transpose_kernel_gradEPN2rt4NodeE
.addrsig_sym _ZN3gpu10kernel_addEPN2rt4NodeE
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_global(int * d_out, int * d_in, int step, const int ARRAY_SIZE)
{
// setting ID
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if(myId >= ARRAY_SIZE)
{
return;
}
// setting itself
int myVal = d_in[myId];
// finding the number to add, checking out-of-bounds
int myAdd;
if((myId - step)<0)
{
myAdd = 0;
}
else
{
myAdd = d_in[myId-step];
}
// setting output
d_out[myId] = myVal + myAdd;
}
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_shared(int * d_out, int * d_in, int step, const int ARRAY_SIZE)
{
// sdate is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
// setting ID
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if(myId >= ARRAY_SIZE)
{
return;
}
// setting itself
int myVal = d_in[myId];
// finding the number to add, checking out-of-bounds
int myAdd;
if((myId - step)<0)
{
myAdd = 0;
}
else
{
myAdd = d_in[myId-step];
}
// setting output
d_out[myId] = myVal + myAdd;
}
void hs_kernel_wrapper(int * d_out, int * d_in, const unsigned int ARRAY_SIZE, const unsigned int ARRAY_BYTES, const unsigned int num_threads)
{
// initializing starting variables
unsigned int num_blocks = ((ARRAY_SIZE) / num_threads) + 1;
int step = 1;
// initializing and allocating an "intermediate" value so we don't have to change anything in d_in
int * d_intermediate;
cudaMalloc((void **) &d_intermediate, ARRAY_BYTES);
cudaMemcpy(d_intermediate, d_in, ARRAY_BYTES, cudaMemcpyDeviceToDevice);
int i = 1;
while(step<ARRAY_SIZE) // stops when step is larger than array size, happens at O(log2(ARRAY_SIZE))
{
// for debugging purposes
// printf("round %d: step %d\n", i, step);
// i++;
// one step/kernel at a time to do synchronization across blocks
hs_kernel_global<<<num_blocks, num_threads>>>(d_out, d_intermediate, step, ARRAY_SIZE);
cudaMemcpy(d_intermediate, d_out, ARRAY_BYTES, cudaMemcpyDeviceToDevice);
step <<= 1; // double step size at each iteration
}
cudaFree(d_intermediate);
}
int main(int argc, char **argv)
{
printf("Hillis and Steele ONLINE... \n");
// defining vars
const unsigned int num_threads = 512;
const unsigned int ARRAY_SIZE = 1<<21;
const unsigned int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
printf("defined vars... \n");
printf("ARRAY_SIZE: %d\n", ARRAY_SIZE);
// setting host in
int h_in[ARRAY_SIZE];
int h_out[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = 3;
h_out[i] = 0;
}
printf("filled array... \n");
// setting device pointers
int * d_in;
int * d_out;
printf("defined device pointers... \n");
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
printf("malloc device pointers... \n");
// transfer arrays to GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
printf("copy device pointers... \n");
// setting up time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// kernel time!!!
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++)
{
hs_kernel_wrapper(d_out, d_in, ARRAY_SIZE, ARRAY_BYTES, num_threads);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculating time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 100.0f;
// back to host
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// printing
for(int i = 400; i<408; i++)
{
printf("index %d: count %d\n", i, h_out[i]);
}
printf("average time elapsed: %f\n", elapsedTime);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
return 0;
} | code for sm_80
Function : _Z16hs_kernel_sharedPiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IADD3 R4, R0, -c[0x0][0x170], RZ ; /* 0x80005c0000047a10 */
/* 0x000fe20007ffe0ff */
/*0070*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe200078e00ff */
/*0080*/ HFMA2.MMA R9, -RZ, RZ, 0, 0 ; /* 0x00000000ff097435 */
/* 0x000fe200000001ff */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00a0*/ ISETP.GE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f06270 */
/*00b0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0205 */
/*00c0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000eac000c1e1900 */
/*00d0*/ @P0 IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004040625 */
/* 0x000fca00078e0205 */
/*00e0*/ @P0 LDG.E R9, [R4.64] ; /* 0x0000000404090981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ SHF.R.S32.HI R7, RZ, 0x1f, R0 ; /* 0x0000001fff077819 */
/* 0x000fe40000011400 */
/*0100*/ LEA R6, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000067a11 */
/* 0x000fc800078010ff */
/*0110*/ LEA.HI.X R7, R0, c[0x0][0x164], R7, 0x2, P0 ; /* 0x0000590000077a11 */
/* 0x000fe200000f1407 */
/*0120*/ IMAD.IADD R9, R2, 0x1, R9 ; /* 0x0000000102097824 */
/* 0x004fca00078e0209 */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z16hs_kernel_globalPiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IADD3 R4, R0, -c[0x0][0x170], RZ ; /* 0x80005c0000047a10 */
/* 0x000fe20007ffe0ff */
/*0070*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe200078e00ff */
/*0080*/ HFMA2.MMA R9, -RZ, RZ, 0, 0 ; /* 0x00000000ff097435 */
/* 0x000fe200000001ff */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00a0*/ ISETP.GE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f06270 */
/*00b0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0205 */
/*00c0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000eac000c1e1900 */
/*00d0*/ @P0 IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004040625 */
/* 0x000fca00078e0205 */
/*00e0*/ @P0 LDG.E R9, [R4.64] ; /* 0x0000000404090981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ SHF.R.S32.HI R7, RZ, 0x1f, R0 ; /* 0x0000001fff077819 */
/* 0x000fe40000011400 */
/*0100*/ LEA R6, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000067a11 */
/* 0x000fc800078010ff */
/*0110*/ LEA.HI.X R7, R0, c[0x0][0x164], R7, 0x2, P0 ; /* 0x0000590000077a11 */
/* 0x000fe200000f1407 */
/*0120*/ IMAD.IADD R9, R2, 0x1, R9 ; /* 0x0000000102097824 */
/* 0x004fca00078e0209 */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_global(int * d_out, int * d_in, int step, const int ARRAY_SIZE)
{
// setting ID
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if(myId >= ARRAY_SIZE)
{
return;
}
// setting itself
int myVal = d_in[myId];
// finding the number to add, checking out-of-bounds
int myAdd;
if((myId - step)<0)
{
myAdd = 0;
}
else
{
myAdd = d_in[myId-step];
}
// setting output
d_out[myId] = myVal + myAdd;
}
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_shared(int * d_out, int * d_in, int step, const int ARRAY_SIZE)
{
// sdate is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
// setting ID
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if(myId >= ARRAY_SIZE)
{
return;
}
// setting itself
int myVal = d_in[myId];
// finding the number to add, checking out-of-bounds
int myAdd;
if((myId - step)<0)
{
myAdd = 0;
}
else
{
myAdd = d_in[myId-step];
}
// setting output
d_out[myId] = myVal + myAdd;
}
void hs_kernel_wrapper(int * d_out, int * d_in, const unsigned int ARRAY_SIZE, const unsigned int ARRAY_BYTES, const unsigned int num_threads)
{
// initializing starting variables
unsigned int num_blocks = ((ARRAY_SIZE) / num_threads) + 1;
int step = 1;
// initializing and allocating an "intermediate" value so we don't have to change anything in d_in
int * d_intermediate;
cudaMalloc((void **) &d_intermediate, ARRAY_BYTES);
cudaMemcpy(d_intermediate, d_in, ARRAY_BYTES, cudaMemcpyDeviceToDevice);
int i = 1;
while(step<ARRAY_SIZE) // stops when step is larger than array size, happens at O(log2(ARRAY_SIZE))
{
// for debugging purposes
// printf("round %d: step %d\n", i, step);
// i++;
// one step/kernel at a time to do synchronization across blocks
hs_kernel_global<<<num_blocks, num_threads>>>(d_out, d_intermediate, step, ARRAY_SIZE);
cudaMemcpy(d_intermediate, d_out, ARRAY_BYTES, cudaMemcpyDeviceToDevice);
step <<= 1; // double step size at each iteration
}
cudaFree(d_intermediate);
}
int main(int argc, char **argv)
{
printf("Hillis and Steele ONLINE... \n");
// defining vars
const unsigned int num_threads = 512;
const unsigned int ARRAY_SIZE = 1<<21;
const unsigned int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
printf("defined vars... \n");
printf("ARRAY_SIZE: %d\n", ARRAY_SIZE);
// setting host in
int h_in[ARRAY_SIZE];
int h_out[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = 3;
h_out[i] = 0;
}
printf("filled array... \n");
// setting device pointers
int * d_in;
int * d_out;
printf("defined device pointers... \n");
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
printf("malloc device pointers... \n");
// transfer arrays to GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
printf("copy device pointers... \n");
// setting up time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// kernel time!!!
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++)
{
hs_kernel_wrapper(d_out, d_in, ARRAY_SIZE, ARRAY_BYTES, num_threads);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculating time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 100.0f;
// back to host
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// printing
for(int i = 400; i<408; i++)
{
printf("index %d: count %d\n", i, h_out[i]);
}
printf("average time elapsed: %f\n", elapsedTime);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
return 0;
} | .file "tmpxft_0005f41b_00000000-6_hillis_and_steele_scan.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z40__device_stub__Z16hs_kernel_globalPiS_iiPiS_ii
.type _Z40__device_stub__Z16hs_kernel_globalPiS_iiPiS_ii, @function
_Z40__device_stub__Z16hs_kernel_globalPiS_iiPiS_ii:
.LFB2083:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16hs_kernel_globalPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z40__device_stub__Z16hs_kernel_globalPiS_iiPiS_ii, .-_Z40__device_stub__Z16hs_kernel_globalPiS_iiPiS_ii
.globl _Z16hs_kernel_globalPiS_ii
.type _Z16hs_kernel_globalPiS_ii, @function
_Z16hs_kernel_globalPiS_ii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z16hs_kernel_globalPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z16hs_kernel_globalPiS_ii, .-_Z16hs_kernel_globalPiS_ii
.globl _Z17hs_kernel_wrapperPiS_jjj
.type _Z17hs_kernel_wrapperPiS_jjj, @function
_Z17hs_kernel_wrapperPiS_jjj:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r12
movq %rsi, %rbx
movl %edx, %ebp
movl %r8d, %r14d
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl %edx, %eax
movl $0, %edx
divl %r8d
leal 1(%rax), %r15d
movl %ecx, %r13d
leaq 8(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movl $3, %ecx
movq %r13, %rdx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
cmpl $1, %ebp
jbe .L12
movl $1, %ebx
jmp .L14
.L13:
movl $3, %ecx
movq %r13, %rdx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
addl %ebx, %ebx
cmpl %ebp, %ebx
jnb .L12
.L14:
movl %r14d, 28(%rsp)
movl $1, 32(%rsp)
movl %r15d, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L13
movl %ebp, %ecx
movl %ebx, %edx
movq 8(%rsp), %rsi
movq %r12, %rdi
call _Z40__device_stub__Z16hs_kernel_globalPiS_iiPiS_ii
jmp .L13
.L12:
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z17hs_kernel_wrapperPiS_jjj, .-_Z17hs_kernel_wrapperPiS_jjj
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hillis and Steele ONLINE... \n"
.LC1:
.string "defined vars... \n"
.LC2:
.string "ARRAY_SIZE: %d\n"
.LC3:
.string "filled array... \n"
.LC4:
.string "defined device pointers... \n"
.LC5:
.string "malloc device pointers... \n"
.LC6:
.string "copy device pointers... \n"
.LC8:
.string "index %d: count %d\n"
.LC9:
.string "average time elapsed: %f\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
leaq -16777216(%rsp), %r11
.cfi_def_cfa 11, 16777240
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $72, %rsp
.cfi_def_cfa_offset 16777312
movq %fs:40, %rax
movq %rax, 16777272(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2097152, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
.L20:
movl $3, 48(%rsp,%rax)
movl $0, 8388656(%rsp,%rax)
addq $4, %rax
cmpq $8388608, %rax
jne .L20
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 16(%rsp), %rdi
movl $8388608, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $8388608, %esi
call cudaMalloc@PLT
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $8388608, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $100, %ebx
.L21:
movl $512, %r8d
movl $8388608, %ecx
movl $2097152, %edx
movq 16(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z17hs_kernel_wrapperPiS_jjj
subl $1, %ebx
jne .L21
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movss 12(%rsp), %xmm0
divss .LC7(%rip), %xmm0
movss %xmm0, 12(%rsp)
leaq 8388656(%rsp), %rdi
movl $2, %ecx
movl $8388608, %edx
movq 24(%rsp), %rsi
call cudaMemcpy@PLT
movl $400, %ebx
leaq .LC8(%rip), %rbp
.L22:
movl 8388656(%rsp,%rbx,4), %ecx
movl %ebx, %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $408, %rbx
jne .L22
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 16777272(%rsp), %rax
subq %fs:40, %rax
jne .L28
movl $0, %eax
addq $16777288, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.globl _Z40__device_stub__Z16hs_kernel_sharedPiS_iiPiS_ii
.type _Z40__device_stub__Z16hs_kernel_sharedPiS_iiPiS_ii, @function
_Z40__device_stub__Z16hs_kernel_sharedPiS_iiPiS_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16hs_kernel_sharedPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z40__device_stub__Z16hs_kernel_sharedPiS_iiPiS_ii, .-_Z40__device_stub__Z16hs_kernel_sharedPiS_iiPiS_ii
.globl _Z16hs_kernel_sharedPiS_ii
.type _Z16hs_kernel_sharedPiS_ii, @function
_Z16hs_kernel_sharedPiS_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z16hs_kernel_sharedPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z16hs_kernel_sharedPiS_ii, .-_Z16hs_kernel_sharedPiS_ii
.section .rodata.str1.1
.LC10:
.string "_Z16hs_kernel_sharedPiS_ii"
.LC11:
.string "_Z16hs_kernel_globalPiS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z16hs_kernel_sharedPiS_ii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z16hs_kernel_globalPiS_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC7:
.long 1120403456
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_global(int * d_out, int * d_in, int step, const int ARRAY_SIZE)
{
// setting ID
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if(myId >= ARRAY_SIZE)
{
return;
}
// setting itself
int myVal = d_in[myId];
// finding the number to add, checking out-of-bounds
int myAdd;
if((myId - step)<0)
{
myAdd = 0;
}
else
{
myAdd = d_in[myId-step];
}
// setting output
d_out[myId] = myVal + myAdd;
}
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_shared(int * d_out, int * d_in, int step, const int ARRAY_SIZE)
{
// sdate is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
// setting ID
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if(myId >= ARRAY_SIZE)
{
return;
}
// setting itself
int myVal = d_in[myId];
// finding the number to add, checking out-of-bounds
int myAdd;
if((myId - step)<0)
{
myAdd = 0;
}
else
{
myAdd = d_in[myId-step];
}
// setting output
d_out[myId] = myVal + myAdd;
}
void hs_kernel_wrapper(int * d_out, int * d_in, const unsigned int ARRAY_SIZE, const unsigned int ARRAY_BYTES, const unsigned int num_threads)
{
// initializing starting variables
unsigned int num_blocks = ((ARRAY_SIZE) / num_threads) + 1;
int step = 1;
// initializing and allocating an "intermediate" value so we don't have to change anything in d_in
int * d_intermediate;
cudaMalloc((void **) &d_intermediate, ARRAY_BYTES);
cudaMemcpy(d_intermediate, d_in, ARRAY_BYTES, cudaMemcpyDeviceToDevice);
int i = 1;
while(step<ARRAY_SIZE) // stops when step is larger than array size, happens at O(log2(ARRAY_SIZE))
{
// for debugging purposes
// printf("round %d: step %d\n", i, step);
// i++;
// one step/kernel at a time to do synchronization across blocks
hs_kernel_global<<<num_blocks, num_threads>>>(d_out, d_intermediate, step, ARRAY_SIZE);
cudaMemcpy(d_intermediate, d_out, ARRAY_BYTES, cudaMemcpyDeviceToDevice);
step <<= 1; // double step size at each iteration
}
cudaFree(d_intermediate);
}
int main(int argc, char **argv)
{
printf("Hillis and Steele ONLINE... \n");
// defining vars
const unsigned int num_threads = 512;
const unsigned int ARRAY_SIZE = 1<<21;
const unsigned int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
printf("defined vars... \n");
printf("ARRAY_SIZE: %d\n", ARRAY_SIZE);
// setting host in
int h_in[ARRAY_SIZE];
int h_out[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = 3;
h_out[i] = 0;
}
printf("filled array... \n");
// setting device pointers
int * d_in;
int * d_out;
printf("defined device pointers... \n");
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
printf("malloc device pointers... \n");
// transfer arrays to GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
printf("copy device pointers... \n");
// setting up time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// kernel time!!!
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++)
{
hs_kernel_wrapper(d_out, d_in, ARRAY_SIZE, ARRAY_BYTES, num_threads);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculating time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 100.0f;
// back to host
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// printing
for(int i = 400; i<408; i++)
{
printf("index %d: count %d\n", i, h_out[i]);
}
printf("average time elapsed: %f\n", elapsedTime);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
return 0;
} | #include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_global(int * d_out, int * d_in, int step, const int ARRAY_SIZE)
{
// setting ID
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if(myId >= ARRAY_SIZE)
{
return;
}
// setting itself
int myVal = d_in[myId];
// finding the number to add, checking out-of-bounds
int myAdd;
if((myId - step)<0)
{
myAdd = 0;
}
else
{
myAdd = d_in[myId-step];
}
// setting output
d_out[myId] = myVal + myAdd;
}
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_shared(int * d_out, int * d_in, int step, const int ARRAY_SIZE)
{
// sdate is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
// setting ID
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if(myId >= ARRAY_SIZE)
{
return;
}
// setting itself
int myVal = d_in[myId];
// finding the number to add, checking out-of-bounds
int myAdd;
if((myId - step)<0)
{
myAdd = 0;
}
else
{
myAdd = d_in[myId-step];
}
// setting output
d_out[myId] = myVal + myAdd;
}
void hs_kernel_wrapper(int * d_out, int * d_in, const unsigned int ARRAY_SIZE, const unsigned int ARRAY_BYTES, const unsigned int num_threads)
{
// initializing starting variables
unsigned int num_blocks = ((ARRAY_SIZE) / num_threads) + 1;
int step = 1;
// initializing and allocating an "intermediate" value so we don't have to change anything in d_in
int * d_intermediate;
hipMalloc((void **) &d_intermediate, ARRAY_BYTES);
hipMemcpy(d_intermediate, d_in, ARRAY_BYTES, hipMemcpyDeviceToDevice);
int i = 1;
while(step<ARRAY_SIZE) // stops when step is larger than array size, happens at O(log2(ARRAY_SIZE))
{
// for debugging purposes
// printf("round %d: step %d\n", i, step);
// i++;
// one step/kernel at a time to do synchronization across blocks
hs_kernel_global<<<num_blocks, num_threads>>>(d_out, d_intermediate, step, ARRAY_SIZE);
hipMemcpy(d_intermediate, d_out, ARRAY_BYTES, hipMemcpyDeviceToDevice);
step <<= 1; // double step size at each iteration
}
hipFree(d_intermediate);
}
int main(int argc, char **argv)
{
printf("Hillis and Steele ONLINE... \n");
// defining vars
const unsigned int num_threads = 512;
const unsigned int ARRAY_SIZE = 1<<21;
const unsigned int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
printf("defined vars... \n");
printf("ARRAY_SIZE: %d\n", ARRAY_SIZE);
// setting host in
int h_in[ARRAY_SIZE];
int h_out[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = 3;
h_out[i] = 0;
}
printf("filled array... \n");
// setting device pointers
int * d_in;
int * d_out;
printf("defined device pointers... \n");
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
printf("malloc device pointers... \n");
// transfer arrays to GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
printf("copy device pointers... \n");
// setting up time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// kernel time!!!
hipEventRecord(start, 0);
for (int i = 0; i < 100; i++)
{
hs_kernel_wrapper(d_out, d_in, ARRAY_SIZE, ARRAY_BYTES, num_threads);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// calculating time
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 100.0f;
// back to host
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// printing
for(int i = 400; i<408; i++)
{
printf("index %d: count %d\n", i, h_out[i]);
}
printf("average time elapsed: %f\n", elapsedTime);
// free GPU memory allocation
hipFree(d_in);
hipFree(d_out);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_global(int * d_out, int * d_in, int step, const int ARRAY_SIZE)
{
// setting ID
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if(myId >= ARRAY_SIZE)
{
return;
}
// setting itself
int myVal = d_in[myId];
// finding the number to add, checking out-of-bounds
int myAdd;
if((myId - step)<0)
{
myAdd = 0;
}
else
{
myAdd = d_in[myId-step];
}
// setting output
d_out[myId] = myVal + myAdd;
}
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_shared(int * d_out, int * d_in, int step, const int ARRAY_SIZE)
{
// sdate is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
// setting ID
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if(myId >= ARRAY_SIZE)
{
return;
}
// setting itself
int myVal = d_in[myId];
// finding the number to add, checking out-of-bounds
int myAdd;
if((myId - step)<0)
{
myAdd = 0;
}
else
{
myAdd = d_in[myId-step];
}
// setting output
d_out[myId] = myVal + myAdd;
}
void hs_kernel_wrapper(int * d_out, int * d_in, const unsigned int ARRAY_SIZE, const unsigned int ARRAY_BYTES, const unsigned int num_threads)
{
// initializing starting variables
unsigned int num_blocks = ((ARRAY_SIZE) / num_threads) + 1;
int step = 1;
// initializing and allocating an "intermediate" value so we don't have to change anything in d_in
int * d_intermediate;
hipMalloc((void **) &d_intermediate, ARRAY_BYTES);
hipMemcpy(d_intermediate, d_in, ARRAY_BYTES, hipMemcpyDeviceToDevice);
int i = 1;
while(step<ARRAY_SIZE) // stops when step is larger than array size, happens at O(log2(ARRAY_SIZE))
{
// for debugging purposes
// printf("round %d: step %d\n", i, step);
// i++;
// one step/kernel at a time to do synchronization across blocks
hs_kernel_global<<<num_blocks, num_threads>>>(d_out, d_intermediate, step, ARRAY_SIZE);
hipMemcpy(d_intermediate, d_out, ARRAY_BYTES, hipMemcpyDeviceToDevice);
step <<= 1; // double step size at each iteration
}
hipFree(d_intermediate);
}
int main(int argc, char **argv)
{
printf("Hillis and Steele ONLINE... \n");
// defining vars
const unsigned int num_threads = 512;
const unsigned int ARRAY_SIZE = 1<<21;
const unsigned int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
printf("defined vars... \n");
printf("ARRAY_SIZE: %d\n", ARRAY_SIZE);
// setting host in
int h_in[ARRAY_SIZE];
int h_out[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = 3;
h_out[i] = 0;
}
printf("filled array... \n");
// setting device pointers
int * d_in;
int * d_out;
printf("defined device pointers... \n");
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
printf("malloc device pointers... \n");
// transfer arrays to GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
printf("copy device pointers... \n");
// setting up time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// kernel time!!!
hipEventRecord(start, 0);
for (int i = 0; i < 100; i++)
{
hs_kernel_wrapper(d_out, d_in, ARRAY_SIZE, ARRAY_BYTES, num_threads);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// calculating time
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 100.0f;
// back to host
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// printing
for(int i = 400; i<408; i++)
{
printf("index %d: count %d\n", i, h_out[i]);
}
printf("average time elapsed: %f\n", elapsedTime);
// free GPU memory allocation
hipFree(d_in);
hipFree(d_out);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16hs_kernel_globalPiS_ii
.globl _Z16hs_kernel_globalPiS_ii
.p2align 8
.type _Z16hs_kernel_globalPiS_ii,@function
_Z16hs_kernel_globalPiS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x14
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v2
s_cbranch_execz .LBB0_4
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x8
s_load_b32 s4, s[0:1], 0x10
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v1, vcc_lo
v_subrev_nc_u32_e32 v2, s4, v2
s_mov_b32 s4, exec_lo
global_load_b32 v4, v[3:4], off
v_mov_b32_e32 v3, 0
v_cmpx_lt_i32_e32 -1, v2
s_cbranch_execz .LBB0_3
v_mov_b32_e32 v3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v2, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_load_b32 v3, v[2:3], off
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s4
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v4
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16hs_kernel_globalPiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16hs_kernel_globalPiS_ii, .Lfunc_end0-_Z16hs_kernel_globalPiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z16hs_kernel_sharedPiS_ii
.globl _Z16hs_kernel_sharedPiS_ii
.p2align 8
.type _Z16hs_kernel_sharedPiS_ii,@function
_Z16hs_kernel_sharedPiS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x14
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v2
s_cbranch_execz .LBB1_4
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x8
s_load_b32 s4, s[0:1], 0x10
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v1, vcc_lo
v_subrev_nc_u32_e32 v2, s4, v2
s_mov_b32 s4, exec_lo
global_load_b32 v4, v[3:4], off
v_mov_b32_e32 v3, 0
v_cmpx_lt_i32_e32 -1, v2
s_cbranch_execz .LBB1_3
v_mov_b32_e32 v3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v2, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_load_b32 v3, v[2:3], off
.LBB1_3:
s_or_b32 exec_lo, exec_lo, s4
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v4
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB1_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16hs_kernel_sharedPiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z16hs_kernel_sharedPiS_ii, .Lfunc_end1-_Z16hs_kernel_sharedPiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16hs_kernel_globalPiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16hs_kernel_globalPiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16hs_kernel_sharedPiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16hs_kernel_sharedPiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_global(int * d_out, int * d_in, int step, const int ARRAY_SIZE)
{
// setting ID
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if(myId >= ARRAY_SIZE)
{
return;
}
// setting itself
int myVal = d_in[myId];
// finding the number to add, checking out-of-bounds
int myAdd;
if((myId - step)<0)
{
myAdd = 0;
}
else
{
myAdd = d_in[myId-step];
}
// setting output
d_out[myId] = myVal + myAdd;
}
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_shared(int * d_out, int * d_in, int step, const int ARRAY_SIZE)
{
// sdate is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
// setting ID
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if(myId >= ARRAY_SIZE)
{
return;
}
// setting itself
int myVal = d_in[myId];
// finding the number to add, checking out-of-bounds
int myAdd;
if((myId - step)<0)
{
myAdd = 0;
}
else
{
myAdd = d_in[myId-step];
}
// setting output
d_out[myId] = myVal + myAdd;
}
void hs_kernel_wrapper(int * d_out, int * d_in, const unsigned int ARRAY_SIZE, const unsigned int ARRAY_BYTES, const unsigned int num_threads)
{
// initializing starting variables
unsigned int num_blocks = ((ARRAY_SIZE) / num_threads) + 1;
int step = 1;
// initializing and allocating an "intermediate" value so we don't have to change anything in d_in
int * d_intermediate;
hipMalloc((void **) &d_intermediate, ARRAY_BYTES);
hipMemcpy(d_intermediate, d_in, ARRAY_BYTES, hipMemcpyDeviceToDevice);
int i = 1;
while(step<ARRAY_SIZE) // stops when step is larger than array size, happens at O(log2(ARRAY_SIZE))
{
// for debugging purposes
// printf("round %d: step %d\n", i, step);
// i++;
// one step/kernel at a time to do synchronization across blocks
hs_kernel_global<<<num_blocks, num_threads>>>(d_out, d_intermediate, step, ARRAY_SIZE);
hipMemcpy(d_intermediate, d_out, ARRAY_BYTES, hipMemcpyDeviceToDevice);
step <<= 1; // double step size at each iteration
}
hipFree(d_intermediate);
}
int main(int argc, char **argv)
{
printf("Hillis and Steele ONLINE... \n");
// defining vars
const unsigned int num_threads = 512;
const unsigned int ARRAY_SIZE = 1<<21;
const unsigned int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
printf("defined vars... \n");
printf("ARRAY_SIZE: %d\n", ARRAY_SIZE);
// setting host in
int h_in[ARRAY_SIZE];
int h_out[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = 3;
h_out[i] = 0;
}
printf("filled array... \n");
// setting device pointers
int * d_in;
int * d_out;
printf("defined device pointers... \n");
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
printf("malloc device pointers... \n");
// transfer arrays to GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
printf("copy device pointers... \n");
// setting up time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// kernel time!!!
hipEventRecord(start, 0);
for (int i = 0; i < 100; i++)
{
hs_kernel_wrapper(d_out, d_in, ARRAY_SIZE, ARRAY_BYTES, num_threads);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// calculating time
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 100.0f;
// back to host
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// printing
for(int i = 400; i<408; i++)
{
printf("index %d: count %d\n", i, h_out[i]);
}
printf("average time elapsed: %f\n", elapsedTime);
// free GPU memory allocation
hipFree(d_in);
hipFree(d_out);
return 0;
} | .text
.file "hillis_and_steele_scan.hip"
.globl _Z31__device_stub__hs_kernel_globalPiS_ii # -- Begin function _Z31__device_stub__hs_kernel_globalPiS_ii
.p2align 4, 0x90
.type _Z31__device_stub__hs_kernel_globalPiS_ii,@function
_Z31__device_stub__hs_kernel_globalPiS_ii: # @_Z31__device_stub__hs_kernel_globalPiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16hs_kernel_globalPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__hs_kernel_globalPiS_ii, .Lfunc_end0-_Z31__device_stub__hs_kernel_globalPiS_ii
.cfi_endproc
# -- End function
.globl _Z31__device_stub__hs_kernel_sharedPiS_ii # -- Begin function _Z31__device_stub__hs_kernel_sharedPiS_ii
.p2align 4, 0x90
.type _Z31__device_stub__hs_kernel_sharedPiS_ii,@function
_Z31__device_stub__hs_kernel_sharedPiS_ii: # @_Z31__device_stub__hs_kernel_sharedPiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16hs_kernel_sharedPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z31__device_stub__hs_kernel_sharedPiS_ii, .Lfunc_end1-_Z31__device_stub__hs_kernel_sharedPiS_ii
.cfi_endproc
# -- End function
.globl _Z17hs_kernel_wrapperPiS_jjj # -- Begin function _Z17hs_kernel_wrapperPiS_jjj
.p2align 4, 0x90
.type _Z17hs_kernel_wrapperPiS_jjj,@function
_Z17hs_kernel_wrapperPiS_jjj: # @_Z17hs_kernel_wrapperPiS_jjj
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r8d, %ebp
movl %edx, %ebx
movq %rsi, %r12
movq %rdi, %r14
movl %ecx, %r15d
movq %rsp, %rdi
movq %r15, %rsi
callq hipMalloc
movq (%rsp), %rdi
movq %r12, %rsi
movq %r15, %rdx
movl $3, %ecx
callq hipMemcpy
cmpl $2, %ebx
jb .LBB2_5
# %bb.1: # %.lr.ph
movl %ebx, %eax
xorl %edx, %edx
divl %ebp
movl %eax, %r12d
incl %r12d
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %r12
movl %ebp, %r13d
orq %rax, %r13
movl $1, %ebp
jmp .LBB2_2
.p2align 4, 0x90
.LBB2_4: # in Loop: Header=BB2_2 Depth=1
movq (%rsp), %rdi
movq %r14, %rsi
movq %r15, %rdx
movl $3, %ecx
callq hipMemcpy
addl %ebp, %ebp
cmpl %ebx, %ebp
jae .LBB2_5
.LBB2_2: # =>This Inner Loop Header: Depth=1
movq %r12, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3: # in Loop: Header=BB2_2 Depth=1
movq (%rsp), %rax
movq %r14, 72(%rsp)
movq %rax, 64(%rsp)
movl %ebp, 12(%rsp)
movl %ebx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movl $_Z16hs_kernel_globalPiS_ii, %edi
leaq 80(%rsp), %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB2_4
.LBB2_5: # %._crit_edge
movq (%rsp), %rdi
callq hipFree
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z17hs_kernel_wrapperPiS_jjj, .Lfunc_end2-_Z17hs_kernel_wrapperPiS_jjj
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x42c80000 # float 100
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16777264, %rsp # imm = 0x1000030
.cfi_def_cfa_offset 16777280
.cfi_offset %rbx, -16
movl $.Lstr, %edi
callq puts@PLT
movl $.Lstr.1, %edi
callq puts@PLT
xorl %ebx, %ebx
movl $.L.str.2, %edi
movl $2097152, %esi # imm = 0x200000
xorl %eax, %eax
callq printf
leaq 48(%rsp), %rdi
movl $8388608, %edx # imm = 0x800000
xorl %esi, %esi
callq memset@PLT
.p2align 4, 0x90
.LBB3_1: # =>This Inner Loop Header: Depth=1
movl $3, 8388656(%rsp,%rbx,4)
incq %rbx
cmpq $2097152, %rbx # imm = 0x200000
jne .LBB3_1
# %bb.2:
movl $.Lstr.2, %edi
callq puts@PLT
movl $.Lstr.3, %edi
callq puts@PLT
leaq 32(%rsp), %rdi
movl $8388608, %esi # imm = 0x800000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $8388608, %esi # imm = 0x800000
callq hipMalloc
movl $.Lstr.4, %edi
callq puts@PLT
movq 32(%rsp), %rdi
leaq 8388656(%rsp), %rsi
movl $8388608, %edx # imm = 0x800000
movl $1, %ecx
callq hipMemcpy
movl $.Lstr.5, %edi
callq puts@PLT
leaq 40(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
callq hipEventCreate
movq 40(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl $100, %ebx
.p2align 4, 0x90
.LBB3_3: # =>This Inner Loop Header: Depth=1
movq 24(%rsp), %rdi
movq 32(%rsp), %rsi
movl $2097152, %edx # imm = 0x200000
movl $8388608, %ecx # imm = 0x800000
movl $512, %r8d # imm = 0x200
callq _Z17hs_kernel_wrapperPiS_jjj
decl %ebx
jne .LBB3_3
# %bb.4:
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rdi
callq hipEventSynchronize
movq 40(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 12(%rsp), %rdi
callq hipEventElapsedTime
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI3_0(%rip), %xmm0
movss %xmm0, 12(%rsp)
movq 24(%rsp), %rsi
leaq 48(%rsp), %rdi
movl $8388608, %edx # imm = 0x800000
movl $2, %ecx
callq hipMemcpy
movl $400, %ebx # imm = 0x190
.p2align 4, 0x90
.LBB3_5: # =>This Inner Loop Header: Depth=1
movl 48(%rsp,%rbx,4), %edx
movl $.L.str.7, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $408, %rbx # imm = 0x198
jne .LBB3_5
# %bb.6:
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $16777264, %rsp # imm = 0x1000030
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16hs_kernel_globalPiS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16hs_kernel_sharedPiS_ii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16hs_kernel_globalPiS_ii,@object # @_Z16hs_kernel_globalPiS_ii
.section .rodata,"a",@progbits
.globl _Z16hs_kernel_globalPiS_ii
.p2align 3, 0x0
_Z16hs_kernel_globalPiS_ii:
.quad _Z31__device_stub__hs_kernel_globalPiS_ii
.size _Z16hs_kernel_globalPiS_ii, 8
.type _Z16hs_kernel_sharedPiS_ii,@object # @_Z16hs_kernel_sharedPiS_ii
.globl _Z16hs_kernel_sharedPiS_ii
.p2align 3, 0x0
_Z16hs_kernel_sharedPiS_ii:
.quad _Z31__device_stub__hs_kernel_sharedPiS_ii
.size _Z16hs_kernel_sharedPiS_ii, 8
.type .L.str.2,@object # @.str.2
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.2:
.asciz "ARRAY_SIZE: %d\n"
.size .L.str.2, 16
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "index %d: count %d\n"
.size .L.str.7, 20
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "average time elapsed: %f\n"
.size .L.str.8, 26
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16hs_kernel_globalPiS_ii"
.size .L__unnamed_1, 27
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z16hs_kernel_sharedPiS_ii"
.size .L__unnamed_2, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hillis and Steele ONLINE... "
.size .Lstr, 29
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "defined vars... "
.size .Lstr.1, 17
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "filled array... "
.size .Lstr.2, 17
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "defined device pointers... "
.size .Lstr.3, 28
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "malloc device pointers... "
.size .Lstr.4, 27
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "copy device pointers... "
.size .Lstr.5, 25
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__hs_kernel_globalPiS_ii
.addrsig_sym _Z31__device_stub__hs_kernel_sharedPiS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16hs_kernel_globalPiS_ii
.addrsig_sym _Z16hs_kernel_sharedPiS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z16hs_kernel_sharedPiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IADD3 R4, R0, -c[0x0][0x170], RZ ; /* 0x80005c0000047a10 */
/* 0x000fe20007ffe0ff */
/*0070*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe200078e00ff */
/*0080*/ HFMA2.MMA R9, -RZ, RZ, 0, 0 ; /* 0x00000000ff097435 */
/* 0x000fe200000001ff */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00a0*/ ISETP.GE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f06270 */
/*00b0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0205 */
/*00c0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000eac000c1e1900 */
/*00d0*/ @P0 IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004040625 */
/* 0x000fca00078e0205 */
/*00e0*/ @P0 LDG.E R9, [R4.64] ; /* 0x0000000404090981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ SHF.R.S32.HI R7, RZ, 0x1f, R0 ; /* 0x0000001fff077819 */
/* 0x000fe40000011400 */
/*0100*/ LEA R6, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000067a11 */
/* 0x000fc800078010ff */
/*0110*/ LEA.HI.X R7, R0, c[0x0][0x164], R7, 0x2, P0 ; /* 0x0000590000077a11 */
/* 0x000fe200000f1407 */
/*0120*/ IMAD.IADD R9, R2, 0x1, R9 ; /* 0x0000000102097824 */
/* 0x004fca00078e0209 */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z16hs_kernel_globalPiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IADD3 R4, R0, -c[0x0][0x170], RZ ; /* 0x80005c0000047a10 */
/* 0x000fe20007ffe0ff */
/*0070*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe200078e00ff */
/*0080*/ HFMA2.MMA R9, -RZ, RZ, 0, 0 ; /* 0x00000000ff097435 */
/* 0x000fe200000001ff */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00a0*/ ISETP.GE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f06270 */
/*00b0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0205 */
/*00c0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000eac000c1e1900 */
/*00d0*/ @P0 IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004040625 */
/* 0x000fca00078e0205 */
/*00e0*/ @P0 LDG.E R9, [R4.64] ; /* 0x0000000404090981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ SHF.R.S32.HI R7, RZ, 0x1f, R0 ; /* 0x0000001fff077819 */
/* 0x000fe40000011400 */
/*0100*/ LEA R6, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000067a11 */
/* 0x000fc800078010ff */
/*0110*/ LEA.HI.X R7, R0, c[0x0][0x164], R7, 0x2, P0 ; /* 0x0000590000077a11 */
/* 0x000fe200000f1407 */
/*0120*/ IMAD.IADD R9, R2, 0x1, R9 ; /* 0x0000000102097824 */
/* 0x004fca00078e0209 */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16hs_kernel_globalPiS_ii
.globl _Z16hs_kernel_globalPiS_ii
.p2align 8
.type _Z16hs_kernel_globalPiS_ii,@function
_Z16hs_kernel_globalPiS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x14
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v2
s_cbranch_execz .LBB0_4
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x8
s_load_b32 s4, s[0:1], 0x10
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v1, vcc_lo
v_subrev_nc_u32_e32 v2, s4, v2
s_mov_b32 s4, exec_lo
global_load_b32 v4, v[3:4], off
v_mov_b32_e32 v3, 0
v_cmpx_lt_i32_e32 -1, v2
s_cbranch_execz .LBB0_3
v_mov_b32_e32 v3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v2, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_load_b32 v3, v[2:3], off
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s4
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v4
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16hs_kernel_globalPiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16hs_kernel_globalPiS_ii, .Lfunc_end0-_Z16hs_kernel_globalPiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z16hs_kernel_sharedPiS_ii
.globl _Z16hs_kernel_sharedPiS_ii
.p2align 8
.type _Z16hs_kernel_sharedPiS_ii,@function
_Z16hs_kernel_sharedPiS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x14
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v2
s_cbranch_execz .LBB1_4
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x8
s_load_b32 s4, s[0:1], 0x10
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v1, vcc_lo
v_subrev_nc_u32_e32 v2, s4, v2
s_mov_b32 s4, exec_lo
global_load_b32 v4, v[3:4], off
v_mov_b32_e32 v3, 0
v_cmpx_lt_i32_e32 -1, v2
s_cbranch_execz .LBB1_3
v_mov_b32_e32 v3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v2, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_load_b32 v3, v[2:3], off
.LBB1_3:
s_or_b32 exec_lo, exec_lo, s4
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v4
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB1_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16hs_kernel_sharedPiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z16hs_kernel_sharedPiS_ii, .Lfunc_end1-_Z16hs_kernel_sharedPiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16hs_kernel_globalPiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16hs_kernel_globalPiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16hs_kernel_sharedPiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16hs_kernel_sharedPiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0005f41b_00000000-6_hillis_and_steele_scan.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z40__device_stub__Z16hs_kernel_globalPiS_iiPiS_ii
.type _Z40__device_stub__Z16hs_kernel_globalPiS_iiPiS_ii, @function
_Z40__device_stub__Z16hs_kernel_globalPiS_iiPiS_ii:
.LFB2083:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16hs_kernel_globalPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z40__device_stub__Z16hs_kernel_globalPiS_iiPiS_ii, .-_Z40__device_stub__Z16hs_kernel_globalPiS_iiPiS_ii
.globl _Z16hs_kernel_globalPiS_ii
.type _Z16hs_kernel_globalPiS_ii, @function
_Z16hs_kernel_globalPiS_ii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z16hs_kernel_globalPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z16hs_kernel_globalPiS_ii, .-_Z16hs_kernel_globalPiS_ii
.globl _Z17hs_kernel_wrapperPiS_jjj
.type _Z17hs_kernel_wrapperPiS_jjj, @function
_Z17hs_kernel_wrapperPiS_jjj:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r12
movq %rsi, %rbx
movl %edx, %ebp
movl %r8d, %r14d
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl %edx, %eax
movl $0, %edx
divl %r8d
leal 1(%rax), %r15d
movl %ecx, %r13d
leaq 8(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movl $3, %ecx
movq %r13, %rdx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
cmpl $1, %ebp
jbe .L12
movl $1, %ebx
jmp .L14
.L13:
movl $3, %ecx
movq %r13, %rdx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
addl %ebx, %ebx
cmpl %ebp, %ebx
jnb .L12
.L14:
movl %r14d, 28(%rsp)
movl $1, 32(%rsp)
movl %r15d, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L13
movl %ebp, %ecx
movl %ebx, %edx
movq 8(%rsp), %rsi
movq %r12, %rdi
call _Z40__device_stub__Z16hs_kernel_globalPiS_iiPiS_ii
jmp .L13
.L12:
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z17hs_kernel_wrapperPiS_jjj, .-_Z17hs_kernel_wrapperPiS_jjj
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hillis and Steele ONLINE... \n"
.LC1:
.string "defined vars... \n"
.LC2:
.string "ARRAY_SIZE: %d\n"
.LC3:
.string "filled array... \n"
.LC4:
.string "defined device pointers... \n"
.LC5:
.string "malloc device pointers... \n"
.LC6:
.string "copy device pointers... \n"
.LC8:
.string "index %d: count %d\n"
.LC9:
.string "average time elapsed: %f\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
leaq -16777216(%rsp), %r11
.cfi_def_cfa 11, 16777240
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $72, %rsp
.cfi_def_cfa_offset 16777312
movq %fs:40, %rax
movq %rax, 16777272(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2097152, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
.L20:
movl $3, 48(%rsp,%rax)
movl $0, 8388656(%rsp,%rax)
addq $4, %rax
cmpq $8388608, %rax
jne .L20
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 16(%rsp), %rdi
movl $8388608, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $8388608, %esi
call cudaMalloc@PLT
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $8388608, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $100, %ebx
.L21:
movl $512, %r8d
movl $8388608, %ecx
movl $2097152, %edx
movq 16(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z17hs_kernel_wrapperPiS_jjj
subl $1, %ebx
jne .L21
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 12(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movss 12(%rsp), %xmm0
divss .LC7(%rip), %xmm0
movss %xmm0, 12(%rsp)
leaq 8388656(%rsp), %rdi
movl $2, %ecx
movl $8388608, %edx
movq 24(%rsp), %rsi
call cudaMemcpy@PLT
movl $400, %ebx
leaq .LC8(%rip), %rbp
.L22:
movl 8388656(%rsp,%rbx,4), %ecx
movl %ebx, %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $408, %rbx
jne .L22
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 16777272(%rsp), %rax
subq %fs:40, %rax
jne .L28
movl $0, %eax
addq $16777288, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.globl _Z40__device_stub__Z16hs_kernel_sharedPiS_iiPiS_ii
.type _Z40__device_stub__Z16hs_kernel_sharedPiS_iiPiS_ii, @function
_Z40__device_stub__Z16hs_kernel_sharedPiS_iiPiS_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16hs_kernel_sharedPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z40__device_stub__Z16hs_kernel_sharedPiS_iiPiS_ii, .-_Z40__device_stub__Z16hs_kernel_sharedPiS_iiPiS_ii
.globl _Z16hs_kernel_sharedPiS_ii
.type _Z16hs_kernel_sharedPiS_ii, @function
_Z16hs_kernel_sharedPiS_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z16hs_kernel_sharedPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z16hs_kernel_sharedPiS_ii, .-_Z16hs_kernel_sharedPiS_ii
.section .rodata.str1.1
.LC10:
.string "_Z16hs_kernel_sharedPiS_ii"
.LC11:
.string "_Z16hs_kernel_globalPiS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z16hs_kernel_sharedPiS_ii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z16hs_kernel_globalPiS_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC7:
.long 1120403456
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "hillis_and_steele_scan.hip"
.globl _Z31__device_stub__hs_kernel_globalPiS_ii # -- Begin function _Z31__device_stub__hs_kernel_globalPiS_ii
.p2align 4, 0x90
.type _Z31__device_stub__hs_kernel_globalPiS_ii,@function
_Z31__device_stub__hs_kernel_globalPiS_ii: # @_Z31__device_stub__hs_kernel_globalPiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16hs_kernel_globalPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__hs_kernel_globalPiS_ii, .Lfunc_end0-_Z31__device_stub__hs_kernel_globalPiS_ii
.cfi_endproc
# -- End function
.globl _Z31__device_stub__hs_kernel_sharedPiS_ii # -- Begin function _Z31__device_stub__hs_kernel_sharedPiS_ii
.p2align 4, 0x90
.type _Z31__device_stub__hs_kernel_sharedPiS_ii,@function
_Z31__device_stub__hs_kernel_sharedPiS_ii: # @_Z31__device_stub__hs_kernel_sharedPiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16hs_kernel_sharedPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z31__device_stub__hs_kernel_sharedPiS_ii, .Lfunc_end1-_Z31__device_stub__hs_kernel_sharedPiS_ii
.cfi_endproc
# -- End function
.globl _Z17hs_kernel_wrapperPiS_jjj # -- Begin function _Z17hs_kernel_wrapperPiS_jjj
.p2align 4, 0x90
.type _Z17hs_kernel_wrapperPiS_jjj,@function
_Z17hs_kernel_wrapperPiS_jjj: # @_Z17hs_kernel_wrapperPiS_jjj
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r8d, %ebp
movl %edx, %ebx
movq %rsi, %r12
movq %rdi, %r14
movl %ecx, %r15d
movq %rsp, %rdi
movq %r15, %rsi
callq hipMalloc
movq (%rsp), %rdi
movq %r12, %rsi
movq %r15, %rdx
movl $3, %ecx
callq hipMemcpy
cmpl $2, %ebx
jb .LBB2_5
# %bb.1: # %.lr.ph
movl %ebx, %eax
xorl %edx, %edx
divl %ebp
movl %eax, %r12d
incl %r12d
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %r12
movl %ebp, %r13d
orq %rax, %r13
movl $1, %ebp
jmp .LBB2_2
.p2align 4, 0x90
.LBB2_4: # in Loop: Header=BB2_2 Depth=1
movq (%rsp), %rdi
movq %r14, %rsi
movq %r15, %rdx
movl $3, %ecx
callq hipMemcpy
addl %ebp, %ebp
cmpl %ebx, %ebp
jae .LBB2_5
.LBB2_2: # =>This Inner Loop Header: Depth=1
movq %r12, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3: # in Loop: Header=BB2_2 Depth=1
movq (%rsp), %rax
movq %r14, 72(%rsp)
movq %rax, 64(%rsp)
movl %ebp, 12(%rsp)
movl %ebx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movl $_Z16hs_kernel_globalPiS_ii, %edi
leaq 80(%rsp), %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB2_4
.LBB2_5: # %._crit_edge
movq (%rsp), %rdi
callq hipFree
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z17hs_kernel_wrapperPiS_jjj, .Lfunc_end2-_Z17hs_kernel_wrapperPiS_jjj
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x42c80000 # float 100
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16777264, %rsp # imm = 0x1000030
.cfi_def_cfa_offset 16777280
.cfi_offset %rbx, -16
movl $.Lstr, %edi
callq puts@PLT
movl $.Lstr.1, %edi
callq puts@PLT
xorl %ebx, %ebx
movl $.L.str.2, %edi
movl $2097152, %esi # imm = 0x200000
xorl %eax, %eax
callq printf
leaq 48(%rsp), %rdi
movl $8388608, %edx # imm = 0x800000
xorl %esi, %esi
callq memset@PLT
.p2align 4, 0x90
.LBB3_1: # =>This Inner Loop Header: Depth=1
movl $3, 8388656(%rsp,%rbx,4)
incq %rbx
cmpq $2097152, %rbx # imm = 0x200000
jne .LBB3_1
# %bb.2:
movl $.Lstr.2, %edi
callq puts@PLT
movl $.Lstr.3, %edi
callq puts@PLT
leaq 32(%rsp), %rdi
movl $8388608, %esi # imm = 0x800000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $8388608, %esi # imm = 0x800000
callq hipMalloc
movl $.Lstr.4, %edi
callq puts@PLT
movq 32(%rsp), %rdi
leaq 8388656(%rsp), %rsi
movl $8388608, %edx # imm = 0x800000
movl $1, %ecx
callq hipMemcpy
movl $.Lstr.5, %edi
callq puts@PLT
leaq 40(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
callq hipEventCreate
movq 40(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl $100, %ebx
.p2align 4, 0x90
.LBB3_3: # =>This Inner Loop Header: Depth=1
movq 24(%rsp), %rdi
movq 32(%rsp), %rsi
movl $2097152, %edx # imm = 0x200000
movl $8388608, %ecx # imm = 0x800000
movl $512, %r8d # imm = 0x200
callq _Z17hs_kernel_wrapperPiS_jjj
decl %ebx
jne .LBB3_3
# %bb.4:
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rdi
callq hipEventSynchronize
movq 40(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 12(%rsp), %rdi
callq hipEventElapsedTime
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI3_0(%rip), %xmm0
movss %xmm0, 12(%rsp)
movq 24(%rsp), %rsi
leaq 48(%rsp), %rdi
movl $8388608, %edx # imm = 0x800000
movl $2, %ecx
callq hipMemcpy
movl $400, %ebx # imm = 0x190
.p2align 4, 0x90
.LBB3_5: # =>This Inner Loop Header: Depth=1
movl 48(%rsp,%rbx,4), %edx
movl $.L.str.7, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $408, %rbx # imm = 0x198
jne .LBB3_5
# %bb.6:
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $16777264, %rsp # imm = 0x1000030
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16hs_kernel_globalPiS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16hs_kernel_sharedPiS_ii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16hs_kernel_globalPiS_ii,@object # @_Z16hs_kernel_globalPiS_ii
.section .rodata,"a",@progbits
.globl _Z16hs_kernel_globalPiS_ii
.p2align 3, 0x0
_Z16hs_kernel_globalPiS_ii:
.quad _Z31__device_stub__hs_kernel_globalPiS_ii
.size _Z16hs_kernel_globalPiS_ii, 8
.type _Z16hs_kernel_sharedPiS_ii,@object # @_Z16hs_kernel_sharedPiS_ii
.globl _Z16hs_kernel_sharedPiS_ii
.p2align 3, 0x0
_Z16hs_kernel_sharedPiS_ii:
.quad _Z31__device_stub__hs_kernel_sharedPiS_ii
.size _Z16hs_kernel_sharedPiS_ii, 8
.type .L.str.2,@object # @.str.2
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.2:
.asciz "ARRAY_SIZE: %d\n"
.size .L.str.2, 16
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "index %d: count %d\n"
.size .L.str.7, 20
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "average time elapsed: %f\n"
.size .L.str.8, 26
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16hs_kernel_globalPiS_ii"
.size .L__unnamed_1, 27
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z16hs_kernel_sharedPiS_ii"
.size .L__unnamed_2, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hillis and Steele ONLINE... "
.size .Lstr, 29
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "defined vars... "
.size .Lstr.1, 17
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "filled array... "
.size .Lstr.2, 17
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "defined device pointers... "
.size .Lstr.3, 28
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "malloc device pointers... "
.size .Lstr.4, 27
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "copy device pointers... "
.size .Lstr.5, 25
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__hs_kernel_globalPiS_ii
.addrsig_sym _Z31__device_stub__hs_kernel_sharedPiS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16hs_kernel_globalPiS_ii
.addrsig_sym _Z16hs_kernel_sharedPiS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void processKernel(int *numberArray, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N)
numberArray[idx] = numberArray[idx] + 1;
}
extern void cuda_doStuff(int *array_in, int *array_out, int N)
{
int *numbers_d;
int numberOfBlocks = 1;
int threadsPerBlock = N;
int maxNumberOfThreads = N;
cudaMalloc((void **) &numbers_d, sizeof(int)*N);
cudaMemcpy(numbers_d, array_in, sizeof(int)*N, cudaMemcpyHostToDevice);
processKernel<<<numberOfBlocks, threadsPerBlock>>>(numbers_d, maxNumberOfThreads);
cudaDeviceSynchronize();
cudaMemcpy(array_out, numbers_d, sizeof(int)*N, cudaMemcpyDeviceToHost);
cudaFree(numbers_d);
return;
} | code for sm_80
Function : _Z13processKernelPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ IADD3 R5, R0, 0x1, RZ ; /* 0x0000000100057810 */
/* 0x004fca0007ffe0ff */
/*00b0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void processKernel(int *numberArray, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N)
numberArray[idx] = numberArray[idx] + 1;
}
extern void cuda_doStuff(int *array_in, int *array_out, int N)
{
int *numbers_d;
int numberOfBlocks = 1;
int threadsPerBlock = N;
int maxNumberOfThreads = N;
cudaMalloc((void **) &numbers_d, sizeof(int)*N);
cudaMemcpy(numbers_d, array_in, sizeof(int)*N, cudaMemcpyHostToDevice);
processKernel<<<numberOfBlocks, threadsPerBlock>>>(numbers_d, maxNumberOfThreads);
cudaDeviceSynchronize();
cudaMemcpy(array_out, numbers_d, sizeof(int)*N, cudaMemcpyDeviceToHost);
cudaFree(numbers_d);
return;
} | .file "tmpxft_0005a57a_00000000-6_cuda_wrapper.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z34__device_stub__Z13processKernelPiiPii
.type _Z34__device_stub__Z13processKernelPiiPii, @function
_Z34__device_stub__Z13processKernelPiiPii:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13processKernelPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z34__device_stub__Z13processKernelPiiPii, .-_Z34__device_stub__Z13processKernelPiiPii
.globl _Z13processKernelPii
.type _Z13processKernelPii, @function
_Z13processKernelPii:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z13processKernelPiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z13processKernelPii, .-_Z13processKernelPii
.globl _Z12cuda_doStuffPiS_i
.type _Z12cuda_doStuffPiS_i, @function
_Z12cuda_doStuffPiS_i:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %r13
movq %rsi, %r12
movl %edx, %ebp
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movslq %edx, %rbx
salq $2, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebp, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 8(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movl %ebp, %esi
movq 8(%rsp), %rdi
call _Z34__device_stub__Z13processKernelPiiPii
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z12cuda_doStuffPiS_i, .-_Z12cuda_doStuffPiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13processKernelPii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13processKernelPii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void processKernel(int *numberArray, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N)
numberArray[idx] = numberArray[idx] + 1;
}
extern void cuda_doStuff(int *array_in, int *array_out, int N)
{
int *numbers_d;
int numberOfBlocks = 1;
int threadsPerBlock = N;
int maxNumberOfThreads = N;
cudaMalloc((void **) &numbers_d, sizeof(int)*N);
cudaMemcpy(numbers_d, array_in, sizeof(int)*N, cudaMemcpyHostToDevice);
processKernel<<<numberOfBlocks, threadsPerBlock>>>(numbers_d, maxNumberOfThreads);
cudaDeviceSynchronize();
cudaMemcpy(array_out, numbers_d, sizeof(int)*N, cudaMemcpyDeviceToHost);
cudaFree(numbers_d);
return;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void processKernel(int *numberArray, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N)
numberArray[idx] = numberArray[idx] + 1;
}
extern void cuda_doStuff(int *array_in, int *array_out, int N)
{
int *numbers_d;
int numberOfBlocks = 1;
int threadsPerBlock = N;
int maxNumberOfThreads = N;
hipMalloc((void **) &numbers_d, sizeof(int)*N);
hipMemcpy(numbers_d, array_in, sizeof(int)*N, hipMemcpyHostToDevice);
processKernel<<<numberOfBlocks, threadsPerBlock>>>(numbers_d, maxNumberOfThreads);
hipDeviceSynchronize();
hipMemcpy(array_out, numbers_d, sizeof(int)*N, hipMemcpyDeviceToHost);
hipFree(numbers_d);
return;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void processKernel(int *numberArray, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N)
numberArray[idx] = numberArray[idx] + 1;
}
extern void cuda_doStuff(int *array_in, int *array_out, int N)
{
int *numbers_d;
int numberOfBlocks = 1;
int threadsPerBlock = N;
int maxNumberOfThreads = N;
hipMalloc((void **) &numbers_d, sizeof(int)*N);
hipMemcpy(numbers_d, array_in, sizeof(int)*N, hipMemcpyHostToDevice);
processKernel<<<numberOfBlocks, threadsPerBlock>>>(numbers_d, maxNumberOfThreads);
hipDeviceSynchronize();
hipMemcpy(array_out, numbers_d, sizeof(int)*N, hipMemcpyDeviceToHost);
hipFree(numbers_d);
return;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13processKernelPii
.globl _Z13processKernelPii
.p2align 8
.type _Z13processKernelPii,@function
_Z13processKernelPii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, 1, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13processKernelPii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13processKernelPii, .Lfunc_end0-_Z13processKernelPii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13processKernelPii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13processKernelPii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void processKernel(int *numberArray, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N)
numberArray[idx] = numberArray[idx] + 1;
}
extern void cuda_doStuff(int *array_in, int *array_out, int N)
{
int *numbers_d;
int numberOfBlocks = 1;
int threadsPerBlock = N;
int maxNumberOfThreads = N;
hipMalloc((void **) &numbers_d, sizeof(int)*N);
hipMemcpy(numbers_d, array_in, sizeof(int)*N, hipMemcpyHostToDevice);
processKernel<<<numberOfBlocks, threadsPerBlock>>>(numbers_d, maxNumberOfThreads);
hipDeviceSynchronize();
hipMemcpy(array_out, numbers_d, sizeof(int)*N, hipMemcpyDeviceToHost);
hipFree(numbers_d);
return;
} | .text
.file "cuda_wrapper.hip"
.globl _Z28__device_stub__processKernelPii # -- Begin function _Z28__device_stub__processKernelPii
.p2align 4, 0x90
.type _Z28__device_stub__processKernelPii,@function
_Z28__device_stub__processKernelPii: # @_Z28__device_stub__processKernelPii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z13processKernelPii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z28__device_stub__processKernelPii, .Lfunc_end0-_Z28__device_stub__processKernelPii
.cfi_endproc
# -- End function
.globl _Z12cuda_doStuffPiS_i # -- Begin function _Z12cuda_doStuffPiS_i
.p2align 4, 0x90
.type _Z12cuda_doStuffPiS_i,@function
_Z12cuda_doStuffPiS_i: # @_Z12cuda_doStuffPiS_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $96, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, %ebp
movq %rsi, %rbx
movq %rdi, %r15
movslq %edx, %r12
leaq (,%r12,4), %r14
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r12d, %edx
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rdi, %rdx
orq $1, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
movl %ebp, 20(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13processKernelPii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
callq hipFree
addq $96, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z12cuda_doStuffPiS_i, .Lfunc_end1-_Z12cuda_doStuffPiS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13processKernelPii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13processKernelPii,@object # @_Z13processKernelPii
.section .rodata,"a",@progbits
.globl _Z13processKernelPii
.p2align 3, 0x0
_Z13processKernelPii:
.quad _Z28__device_stub__processKernelPii
.size _Z13processKernelPii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13processKernelPii"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__processKernelPii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13processKernelPii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z13processKernelPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ IADD3 R5, R0, 0x1, RZ ; /* 0x0000000100057810 */
/* 0x004fca0007ffe0ff */
/*00b0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13processKernelPii
.globl _Z13processKernelPii
.p2align 8
.type _Z13processKernelPii,@function
_Z13processKernelPii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, 1, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13processKernelPii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13processKernelPii, .Lfunc_end0-_Z13processKernelPii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13processKernelPii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13processKernelPii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0005a57a_00000000-6_cuda_wrapper.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z34__device_stub__Z13processKernelPiiPii
.type _Z34__device_stub__Z13processKernelPiiPii, @function
_Z34__device_stub__Z13processKernelPiiPii:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z13processKernelPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z34__device_stub__Z13processKernelPiiPii, .-_Z34__device_stub__Z13processKernelPiiPii
.globl _Z13processKernelPii
.type _Z13processKernelPii, @function
_Z13processKernelPii:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z13processKernelPiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z13processKernelPii, .-_Z13processKernelPii
.globl _Z12cuda_doStuffPiS_i
.type _Z12cuda_doStuffPiS_i, @function
_Z12cuda_doStuffPiS_i:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %r13
movq %rsi, %r12
movl %edx, %ebp
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movslq %edx, %rbx
salq $2, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebp, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 8(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movl %ebp, %esi
movq 8(%rsp), %rdi
call _Z34__device_stub__Z13processKernelPiiPii
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z12cuda_doStuffPiS_i, .-_Z12cuda_doStuffPiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13processKernelPii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13processKernelPii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuda_wrapper.hip"
.globl _Z28__device_stub__processKernelPii # -- Begin function _Z28__device_stub__processKernelPii
.p2align 4, 0x90
.type _Z28__device_stub__processKernelPii,@function
_Z28__device_stub__processKernelPii: # @_Z28__device_stub__processKernelPii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z13processKernelPii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z28__device_stub__processKernelPii, .Lfunc_end0-_Z28__device_stub__processKernelPii
.cfi_endproc
# -- End function
.globl _Z12cuda_doStuffPiS_i # -- Begin function _Z12cuda_doStuffPiS_i
.p2align 4, 0x90
.type _Z12cuda_doStuffPiS_i,@function
_Z12cuda_doStuffPiS_i: # @_Z12cuda_doStuffPiS_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $96, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, %ebp
movq %rsi, %rbx
movq %rdi, %r15
movslq %edx, %r12
leaq (,%r12,4), %r14
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r12d, %edx
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rdi, %rdx
orq $1, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
movl %ebp, 20(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13processKernelPii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
callq hipFree
addq $96, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z12cuda_doStuffPiS_i, .Lfunc_end1-_Z12cuda_doStuffPiS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13processKernelPii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13processKernelPii,@object # @_Z13processKernelPii
.section .rodata,"a",@progbits
.globl _Z13processKernelPii
.p2align 3, 0x0
_Z13processKernelPii:
.quad _Z28__device_stub__processKernelPii
.size _Z13processKernelPii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13processKernelPii"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__processKernelPii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13processKernelPii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
CUDA good to knows:
Basics:
Per thread:
registers (fast)
local memory (off-chip [still on the GPU though], slow)
Per block:
multiple threads
shared memory (semi-fast)
Per GPU:
Multiple kernels that each run multiple blocks
Global memory (off-chip [still on the GPU though], slow)
Threads are executed by thread processors
Threads reside in thread blocks
Thread blocks are executed by multiprocessors
Several concurrent thread blocks can reside on one multiprocessor
Limited by multiprocessor resources (shared memory and registers)
A kernel is launched as a grid of thread blocks. Only one kernel can execute on a device at a time.
Advanced:
cudaMemcpy(dst, src, size, direction)
blocks CPU thread.
Compiler tips:
nvcc <filename>.cu [-o <executable>]
Builds release mode
nvcc -g <filename>.cu
Builds debug mode
Can debug host code but not device code
nvcc -deviceemu <filename>.cu
Builds device emulation mode
All code runs on CPU, no debug symbols
nvcc -deviceemu -g <filename>.cu
Builds debug device emulation mode
All code runs on CPU, with debug symbols
Tips and tricks:
If our arrays A,B,C are shorter than 1024 elements, N < 1024, then
– one thread block is enough
– N threads in the thread block
If our arrays are longer than 1024, then
– Choose the number of threads in the thread blocks to be
integer*32
– Calculate how many thread blocks you need
– There will be some threads that should do nothing
Why multiples of 32?
– Threads are executed synchronously in bunches of 32 =
warp
– All threads must have their data ready before the warp runs
– Cache lines are 4 B x warp size = 128 B
– GPU resources can be fully utilized when these parameters
are used
# of blocks = ceil(N/threadsInBlock)
= (N+threadsInBlock-1)/threadsInBlock
Compile:
nvcc -o galaxy galaxy_program.cu -res-usage
Run:
time ./galaxy
*/
#include <stdio.h>
#include <iostream>
#include <fstream>
using namespace std;
// Declare functions and classes that are below main.
class GalaxyFile{
public:
int number_of_galaxies;
float *alphas, *deltas;
GalaxyFile(){}
GalaxyFile(int num, float *as, float *ds)
{
number_of_galaxies = num;
alphas = as;
deltas = ds;
}
};
void print_omegas(float*, int);
void write_omegas_to_file(string, float*);
void write_histogram_to_file(string, int*);
void print_histogram(string, int*, int);
GalaxyFile readFile(string);
// Define some useful macros
#define BIN_WIDTH 0.25f
#define BIN_MIN 0.0f
#define BIN_MAX 180.0f
#define NUMBER_OF_BINS (int)(BIN_MAX*(1.0f/BIN_WIDTH))
// Google is your friend.
#define ARCMINS_TO_RADIANS 0.000290888209f
#define RADIANS_TO_DEGREES 57.295779513f
__global__
void angle_between_galaxies(float *alphas1, float *deltas1, float *alphas2, float *deltas2, int *gpu_hist){
int idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx < 100000){
for(int i=0; i<100000; i++){
float angle = 0.0f;
// Don't do duplicates
if( alphas1[i] != alphas2[idx] && deltas1[i] != deltas2[idx] ) {
float x = sin(deltas1[i]) * sin(deltas2[idx]) + cos(deltas1[i]) * cos(deltas2[idx]) * cos(alphas1[i] - alphas2[idx]);
angle = acosf(fmaxf(-1.0f, fminf(x, 1.0f))) * RADIANS_TO_DEGREES;
}
int ix = (int)(floor(angle * (1.0f/BIN_WIDTH))) % NUMBER_OF_BINS;
__syncthreads();
atomicAdd(&gpu_hist[ix], 1);
}
}
}
int* calculate_histogram(GalaxyFile galaxies1, GalaxyFile galaxies2){
// Declare and allocate memory for histogram arrays that will be accessible on CPU
float galaxy_array_size = galaxies1.number_of_galaxies * sizeof(float);
float histogram_size = NUMBER_OF_BINS * sizeof(int);
int *histogram;
int *total_histogram;
histogram = (int *) malloc(NUMBER_OF_BINS*sizeof(int));
total_histogram = (int *) malloc(NUMBER_OF_BINS*sizeof(int));
memset(total_histogram, 0, NUMBER_OF_BINS*sizeof(int));
// Declare angle arrays that will be accessible on GPU
float *gpu_alphas1;
float *gpu_deltas1;
float *gpu_alphas2;
float *gpu_deltas2;
int *gpu_histogram;
// Allocate memory on GPU for angle arrays
cudaMalloc((void**) &gpu_alphas1, galaxy_array_size);
cudaMalloc((void**) &gpu_deltas1, galaxy_array_size);
cudaMalloc((void**) &gpu_alphas2, galaxy_array_size);
cudaMalloc((void**) &gpu_deltas2, galaxy_array_size);
cudaMalloc((void**) &gpu_histogram, NUMBER_OF_BINS*sizeof(int));
// Copy angles from CPU onto GPU
cudaMemcpy(gpu_alphas1, galaxies1.alphas, galaxy_array_size, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_deltas1, galaxies1.deltas, galaxy_array_size, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_alphas2, galaxies2.alphas, galaxy_array_size, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_deltas2, galaxies2.deltas, galaxy_array_size, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_histogram, histogram, galaxy_array_size, cudaMemcpyHostToDevice);
int warp_size = 32;
int threadsInBlock = 11 * warp_size;
int blocksInGrid = ceil((galaxies1.number_of_galaxies + threadsInBlock) / threadsInBlock);
// Define the grid size (blocks per grid)
dim3 dimGrid(blocksInGrid);
// Define block size (threads per block)
dim3 dimBlock(threadsInBlock);
// Write histogram full of zeros
cudaMemset(gpu_histogram, 0, histogram_size);
// Calculate angles between galaxies1[i] and every galaxy in galaxies2
angle_between_galaxies<<<dimGrid, dimBlock>>>(gpu_alphas1, gpu_deltas1, gpu_alphas2, gpu_deltas2, gpu_histogram);
// Copy result histogram into CPU histogram
cudaMemcpy(histogram, gpu_histogram, histogram_size, cudaMemcpyDeviceToHost);
// Free all the memory we allocated on the GPU
cudaFree( gpu_alphas1 );
cudaFree( gpu_deltas1 );
cudaFree( gpu_alphas2 );
cudaFree( gpu_deltas2 );
cudaFree( gpu_histogram );
return histogram;
}
float* calculate_omegas(int* DD, int* DR, int* RR){
float* omegas;
omegas = (float *) malloc(NUMBER_OF_BINS*sizeof(float));
for(int i=0; i<NUMBER_OF_BINS; i++){
if(RR[i] != 0.0f){
omegas[i] = (DD[i] - 2.0f*DR[i] + RR[i]) / RR[i];
}else{
omegas[i] = 0.0f;
}
}
return omegas;
}
// CUDA program that calculates distribution of galaxies
int main()
{
// Read files and store data in GalaxyFile classes.
GalaxyFile galaxies1;
GalaxyFile galaxies2;
galaxies1 = readFile("test_data/flat_100k_arcmin.txt");
galaxies2 = readFile("test_data/data_100k_arcmin.txt");
int* DD_hist = calculate_histogram(galaxies1, galaxies1);
int* DR_hist = calculate_histogram(galaxies1, galaxies2);
int* RR_hist = calculate_histogram(galaxies2, galaxies2);
print_histogram("DD", DD_hist, 20);
print_histogram("DR", DR_hist, 20);
print_histogram("RR", RR_hist, 20);
write_histogram_to_file("dd_histogram.txt", DD_hist);
write_histogram_to_file("dr_histogram.txt", DR_hist);
write_histogram_to_file("rr_histogram.txt", RR_hist);
float* omegas = calculate_omegas(DD_hist, DR_hist, RR_hist);
print_omegas(omegas, 15);
write_omegas_to_file("omegas.txt", omegas);
return EXIT_SUCCESS;
}
/* UTILITY FUNCTIONS/CLASSES BELOW */
GalaxyFile readFile(string filename)
{
ifstream infile(filename);
int number_of_galaxies;
// Read first line which is the number of galaxies that's stored in the file.
infile >> number_of_galaxies;
float galaxy_array_size = number_of_galaxies * sizeof(float);
float *alphas, *deltas;
alphas = (float*) malloc(galaxy_array_size);
deltas = (float*) malloc(galaxy_array_size);
float alpha;
float delta;
// Read arc minute angles for each galaxy
// Then convert those angles to radians and store those in alphas and deltas arrays
for(int i=0; i<number_of_galaxies; i++) {
infile >> alpha >> delta;
alphas[i] = alpha * ARCMINS_TO_RADIANS;
deltas[i] = delta * ARCMINS_TO_RADIANS;
}
infile.close();
GalaxyFile galaxyFile(number_of_galaxies, alphas, deltas);
return galaxyFile;
}
void print_omegas(float* omegas, int bins_to_print){
for (int i=0; i<NUMBER_OF_BINS; i++){
if(omegas[i] != 0.0f && i < bins_to_print){
printf("omegas[%d]: %f\n", i, omegas[i]);
}
}
}
void print_histogram(string label, int *histogram, int bins_to_print){
long long galaxies_counted = 0;
// Print each bucket bin that has 1 or more galaxy-pair-angle in it.
for (int i=0; i<NUMBER_OF_BINS; i++) {
float bucket_min = (float)i / (1.0f/BIN_WIDTH);
float bucket_max = (float)i / (1.0f/BIN_WIDTH) + BIN_WIDTH;
int bucket_value = histogram[i];
galaxies_counted += histogram[i];
if(bucket_value > 0 && i < bins_to_print){
printf("[%f, %f]: %d\n", bucket_min, bucket_max, bucket_value);
}
}
cout << "Galaxy pairs counted in " << label << ": " << galaxies_counted << endl;
}
void write_omegas_to_file(string filename, float* omegas){
ofstream file;
file.open("output/"+filename);
for (int i=0; i<NUMBER_OF_BINS; i++){
file << omegas[i];
if(i<NUMBER_OF_BINS-1) file << "\n";
}
file.close();
}
void write_histogram_to_file(string filename, int* histogram){
ofstream file;
file.open("output/"+filename);
for (int i=0; i<NUMBER_OF_BINS; i++){
file << histogram[i];
if(i<NUMBER_OF_BINS-1) file << "\n";
}
file.close();
} | /*
CUDA good to knows:
Basics:
Per thread:
registers (fast)
local memory (off-chip [still on the GPU though], slow)
Per block:
multiple threads
shared memory (semi-fast)
Per GPU:
Multiple kernels that each run multiple blocks
Global memory (off-chip [still on the GPU though], slow)
Threads are executed by thread processors
Threads reside in thread blocks
Thread blocks are executed by multiprocessors
Several concurrent thread blocks can reside on one multiprocessor
Limited by multiprocessor resources (shared memory and registers)
A kernel is launched as a grid of thread blocks. Only one kernel can execute on a device at a time.
Advanced:
cudaMemcpy(dst, src, size, direction)
blocks CPU thread.
Compiler tips:
nvcc <filename>.cu [-o <executable>]
Builds release mode
nvcc -g <filename>.cu
Builds debug mode
Can debug host code but not device code
nvcc -deviceemu <filename>.cu
Builds device emulation mode
All code runs on CPU, no debug symbols
nvcc -deviceemu -g <filename>.cu
Builds debug device emulation mode
All code runs on CPU, with debug symbols
Tips and tricks:
If our arrays A,B,C are shorter than 1024 elements, N < 1024, then
– one thread block is enough
– N threads in the thread block
If our arrays are longer than 1024, then
– Choose the number of threads in the thread blocks to be
integer*32
– Calculate how many thread blocks you need
– There will be some threads that should do nothing
Why multiples of 32?
– Threads are executed synchronously in bunches of 32 =
warp
– All threads must have their data ready before the warp runs
– Cache lines are 4 B x warp size = 128 B
– GPU resources can be fully utilized when these parameters
are used
# of blocks = ceil(N/threadsInBlock)
= (N+threadsInBlock-1)/threadsInBlock
Compile:
nvcc -o galaxy galaxy_program.cu -res-usage
Run:
time ./galaxy
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
using namespace std;
// Declare functions and classes that are below main.
class GalaxyFile{
public:
int number_of_galaxies;
float *alphas, *deltas;
GalaxyFile(){}
GalaxyFile(int num, float *as, float *ds)
{
number_of_galaxies = num;
alphas = as;
deltas = ds;
}
};
void print_omegas(float*, int);
void write_omegas_to_file(string, float*);
void write_histogram_to_file(string, int*);
void print_histogram(string, int*, int);
GalaxyFile readFile(string);
// Define some useful macros
#define BIN_WIDTH 0.25f
#define BIN_MIN 0.0f
#define BIN_MAX 180.0f
#define NUMBER_OF_BINS (int)(BIN_MAX*(1.0f/BIN_WIDTH))
// Google is your friend.
#define ARCMINS_TO_RADIANS 0.000290888209f
#define RADIANS_TO_DEGREES 57.295779513f
__global__
void angle_between_galaxies(float *alphas1, float *deltas1, float *alphas2, float *deltas2, int *gpu_hist){
int idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx < 100000){
for(int i=0; i<100000; i++){
float angle = 0.0f;
// Don't do duplicates
if( alphas1[i] != alphas2[idx] && deltas1[i] != deltas2[idx] ) {
float x = sin(deltas1[i]) * sin(deltas2[idx]) + cos(deltas1[i]) * cos(deltas2[idx]) * cos(alphas1[i] - alphas2[idx]);
angle = acosf(fmaxf(-1.0f, fminf(x, 1.0f))) * RADIANS_TO_DEGREES;
}
int ix = (int)(floor(angle * (1.0f/BIN_WIDTH))) % NUMBER_OF_BINS;
__syncthreads();
atomicAdd(&gpu_hist[ix], 1);
}
}
}
int* calculate_histogram(GalaxyFile galaxies1, GalaxyFile galaxies2){
// Declare and allocate memory for histogram arrays that will be accessible on CPU
float galaxy_array_size = galaxies1.number_of_galaxies * sizeof(float);
float histogram_size = NUMBER_OF_BINS * sizeof(int);
int *histogram;
int *total_histogram;
histogram = (int *) malloc(NUMBER_OF_BINS*sizeof(int));
total_histogram = (int *) malloc(NUMBER_OF_BINS*sizeof(int));
memset(total_histogram, 0, NUMBER_OF_BINS*sizeof(int));
// Declare angle arrays that will be accessible on GPU
float *gpu_alphas1;
float *gpu_deltas1;
float *gpu_alphas2;
float *gpu_deltas2;
int *gpu_histogram;
// Allocate memory on GPU for angle arrays
hipMalloc((void**) &gpu_alphas1, galaxy_array_size);
hipMalloc((void**) &gpu_deltas1, galaxy_array_size);
hipMalloc((void**) &gpu_alphas2, galaxy_array_size);
hipMalloc((void**) &gpu_deltas2, galaxy_array_size);
hipMalloc((void**) &gpu_histogram, NUMBER_OF_BINS*sizeof(int));
// Copy angles from CPU onto GPU
hipMemcpy(gpu_alphas1, galaxies1.alphas, galaxy_array_size, hipMemcpyHostToDevice);
hipMemcpy(gpu_deltas1, galaxies1.deltas, galaxy_array_size, hipMemcpyHostToDevice);
hipMemcpy(gpu_alphas2, galaxies2.alphas, galaxy_array_size, hipMemcpyHostToDevice);
hipMemcpy(gpu_deltas2, galaxies2.deltas, galaxy_array_size, hipMemcpyHostToDevice);
hipMemcpy(gpu_histogram, histogram, galaxy_array_size, hipMemcpyHostToDevice);
int warp_size = 32;
int threadsInBlock = 11 * warp_size;
int blocksInGrid = ceil((galaxies1.number_of_galaxies + threadsInBlock) / threadsInBlock);
// Define the grid size (blocks per grid)
dim3 dimGrid(blocksInGrid);
// Define block size (threads per block)
dim3 dimBlock(threadsInBlock);
// Write histogram full of zeros
hipMemset(gpu_histogram, 0, histogram_size);
// Calculate angles between galaxies1[i] and every galaxy in galaxies2
angle_between_galaxies<<<dimGrid, dimBlock>>>(gpu_alphas1, gpu_deltas1, gpu_alphas2, gpu_deltas2, gpu_histogram);
// Copy result histogram into CPU histogram
hipMemcpy(histogram, gpu_histogram, histogram_size, hipMemcpyDeviceToHost);
// Free all the memory we allocated on the GPU
hipFree( gpu_alphas1 );
hipFree( gpu_deltas1 );
hipFree( gpu_alphas2 );
hipFree( gpu_deltas2 );
hipFree( gpu_histogram );
return histogram;
}
float* calculate_omegas(int* DD, int* DR, int* RR){
float* omegas;
omegas = (float *) malloc(NUMBER_OF_BINS*sizeof(float));
for(int i=0; i<NUMBER_OF_BINS; i++){
if(RR[i] != 0.0f){
omegas[i] = (DD[i] - 2.0f*DR[i] + RR[i]) / RR[i];
}else{
omegas[i] = 0.0f;
}
}
return omegas;
}
// CUDA program that calculates distribution of galaxies
int main()
{
// Read files and store data in GalaxyFile classes.
GalaxyFile galaxies1;
GalaxyFile galaxies2;
galaxies1 = readFile("test_data/flat_100k_arcmin.txt");
galaxies2 = readFile("test_data/data_100k_arcmin.txt");
int* DD_hist = calculate_histogram(galaxies1, galaxies1);
int* DR_hist = calculate_histogram(galaxies1, galaxies2);
int* RR_hist = calculate_histogram(galaxies2, galaxies2);
print_histogram("DD", DD_hist, 20);
print_histogram("DR", DR_hist, 20);
print_histogram("RR", RR_hist, 20);
write_histogram_to_file("dd_histogram.txt", DD_hist);
write_histogram_to_file("dr_histogram.txt", DR_hist);
write_histogram_to_file("rr_histogram.txt", RR_hist);
float* omegas = calculate_omegas(DD_hist, DR_hist, RR_hist);
print_omegas(omegas, 15);
write_omegas_to_file("omegas.txt", omegas);
return EXIT_SUCCESS;
}
/* UTILITY FUNCTIONS/CLASSES BELOW */
GalaxyFile readFile(string filename)
{
ifstream infile(filename);
int number_of_galaxies;
// Read first line which is the number of galaxies that's stored in the file.
infile >> number_of_galaxies;
float galaxy_array_size = number_of_galaxies * sizeof(float);
float *alphas, *deltas;
alphas = (float*) malloc(galaxy_array_size);
deltas = (float*) malloc(galaxy_array_size);
float alpha;
float delta;
// Read arc minute angles for each galaxy
// Then convert those angles to radians and store those in alphas and deltas arrays
for(int i=0; i<number_of_galaxies; i++) {
infile >> alpha >> delta;
alphas[i] = alpha * ARCMINS_TO_RADIANS;
deltas[i] = delta * ARCMINS_TO_RADIANS;
}
infile.close();
GalaxyFile galaxyFile(number_of_galaxies, alphas, deltas);
return galaxyFile;
}
void print_omegas(float* omegas, int bins_to_print){
for (int i=0; i<NUMBER_OF_BINS; i++){
if(omegas[i] != 0.0f && i < bins_to_print){
printf("omegas[%d]: %f\n", i, omegas[i]);
}
}
}
void print_histogram(string label, int *histogram, int bins_to_print){
long long galaxies_counted = 0;
// Print each bucket bin that has 1 or more galaxy-pair-angle in it.
for (int i=0; i<NUMBER_OF_BINS; i++) {
float bucket_min = (float)i / (1.0f/BIN_WIDTH);
float bucket_max = (float)i / (1.0f/BIN_WIDTH) + BIN_WIDTH;
int bucket_value = histogram[i];
galaxies_counted += histogram[i];
if(bucket_value > 0 && i < bins_to_print){
printf("[%f, %f]: %d\n", bucket_min, bucket_max, bucket_value);
}
}
cout << "Galaxy pairs counted in " << label << ": " << galaxies_counted << endl;
}
void write_omegas_to_file(string filename, float* omegas){
ofstream file;
file.open("output/"+filename);
for (int i=0; i<NUMBER_OF_BINS; i++){
file << omegas[i];
if(i<NUMBER_OF_BINS-1) file << "\n";
}
file.close();
}
void write_histogram_to_file(string filename, int* histogram){
ofstream file;
file.open("output/"+filename);
for (int i=0; i<NUMBER_OF_BINS; i++){
file << histogram[i];
if(i<NUMBER_OF_BINS-1) file << "\n";
}
file.close();
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 10
#define num_threads 10000
__global__ void increment_naive(int *d)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
tid = tid % N;
d[tid] += 1;
}
__global__ void increment_atomic(int *d)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
tid = tid % N;
atomicAdd(&d[tid], 1);
}
int main()
{
int h[N], *d;
cudaMalloc(&d, sizeof(int)*N);
cudaMemset(d, 0, sizeof(int)*N);
increment_naive<<<(num_threads/N), N>>>(d);
cudaMemcpy(h, d, sizeof(int)*N, cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++)
std::cout << h[i] << "\n";
cudaMemset(d, 0, sizeof(int)*N);
increment_atomic<<<(num_threads/N), N>>>(d);
cudaMemcpy(h, d, sizeof(int)*N, cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++)
std::cout << h[i] << "\n";
}
//12
//12
//12
//12
//12
//12
//12
//12
//12
//12
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000 | code for sm_80
Function : _Z16increment_atomicPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ MOV R5, 0x1 ; /* 0x0000000100057802 */
/* 0x000fe20000000f00 */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0050*/ IMAD R0, R3, c[0x0][0x0], R0 ; /* 0x0000000003007a24 */
/* 0x001fc800078e0200 */
/*0060*/ IMAD.HI R2, R0, 0x66666667, RZ ; /* 0x6666666700027827 */
/* 0x000fca00078e02ff */
/*0070*/ SHF.R.U32.HI R3, RZ, 0x1f, R2 ; /* 0x0000001fff037819 */
/* 0x000fc80000011602 */
/*0080*/ LEA.HI.SX32 R3, R2, R3, 0x1e ; /* 0x0000000302037211 */
/* 0x000fe200078ff2ff */
/*0090*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fc800000001ff */
/*00a0*/ IMAD R3, R3, -0xa, R0 ; /* 0xfffffff603037824 */
/* 0x000fcc00078e0200 */
/*00b0*/ IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fca00078e0202 */
/*00c0*/ RED.E.ADD.STRONG.GPU [R2.64], R5 ; /* 0x000000050200798e */
/* 0x000fe2000c10e184 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z15increment_naivePi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0040*/ IMAD R0, R3, c[0x0][0x0], R0 ; /* 0x0000000003007a24 */
/* 0x001fc800078e0200 */
/*0050*/ IMAD.HI R2, R0, 0x66666667, RZ ; /* 0x6666666700027827 */
/* 0x000fca00078e02ff */
/*0060*/ SHF.R.U32.HI R3, RZ, 0x1f, R2 ; /* 0x0000001fff037819 */
/* 0x000fc80000011602 */
/*0070*/ LEA.HI.SX32 R3, R2, R3, 0x1e ; /* 0x0000000302037211 */
/* 0x000fe200078ff2ff */
/*0080*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fc800000001ff */
/*0090*/ IMAD R3, R3, -0xa, R0 ; /* 0xfffffff603037824 */
/* 0x000fcc00078e0200 */
/*00a0*/ IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fca00078e0202 */
/*00b0*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ IADD3 R5, R0, 0x1, RZ ; /* 0x0000000100057810 */
/* 0x004fca0007ffe0ff */
/*00d0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 10
#define num_threads 10000
__global__ void increment_naive(int *d)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
tid = tid % N;
d[tid] += 1;
}
__global__ void increment_atomic(int *d)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
tid = tid % N;
atomicAdd(&d[tid], 1);
}
int main()
{
int h[N], *d;
cudaMalloc(&d, sizeof(int)*N);
cudaMemset(d, 0, sizeof(int)*N);
increment_naive<<<(num_threads/N), N>>>(d);
cudaMemcpy(h, d, sizeof(int)*N, cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++)
std::cout << h[i] << "\n";
cudaMemset(d, 0, sizeof(int)*N);
increment_atomic<<<(num_threads/N), N>>>(d);
cudaMemcpy(h, d, sizeof(int)*N, cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++)
std::cout << h[i] << "\n";
}
//12
//12
//12
//12
//12
//12
//12
//12
//12
//12
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000 | .file "tmpxft_000d46a5_00000000-6_9_atomic_operations.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z15increment_naivePiPi
.type _Z35__device_stub__Z15increment_naivePiPi, @function
_Z35__device_stub__Z15increment_naivePiPi:
.LFB3694:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z15increment_naivePi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z35__device_stub__Z15increment_naivePiPi, .-_Z35__device_stub__Z15increment_naivePiPi
.globl _Z15increment_naivePi
.type _Z15increment_naivePi, @function
_Z15increment_naivePi:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z15increment_naivePiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z15increment_naivePi, .-_Z15increment_naivePi
.globl _Z36__device_stub__Z16increment_atomicPiPi
.type _Z36__device_stub__Z16increment_atomicPiPi, @function
_Z36__device_stub__Z16increment_atomicPiPi:
.LFB3696:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z16increment_atomicPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z36__device_stub__Z16increment_atomicPiPi, .-_Z36__device_stub__Z16increment_atomicPiPi
.globl _Z16increment_atomicPi
.type _Z16increment_atomicPi, @function
_Z16increment_atomicPi:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z16increment_atomicPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z16increment_atomicPi, .-_Z16increment_atomicPi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $80, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $40, %esi
call cudaMalloc@PLT
movl $40, %edx
movl $0, %esi
movq (%rsp), %rdi
call cudaMemset@PLT
movl $10, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1000, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L28
.L20:
leaq 32(%rsp), %rbx
movl $2, %ecx
movl $40, %edx
movq (%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movq %rbx, %rbp
leaq 72(%rsp), %r12
leaq _ZSt4cout(%rip), %r14
leaq .LC0(%rip), %r13
.L21:
movl (%rbx), %esi
movq %r14, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %r13, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbx, %r12
jne .L21
movl $40, %edx
movl $0, %esi
movq (%rsp), %rdi
call cudaMemset@PLT
movl $10, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1000, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L29
.L22:
leaq 32(%rsp), %rdi
movl $2, %ecx
movl $40, %edx
movq (%rsp), %rsi
call cudaMemcpy@PLT
leaq _ZSt4cout(%rip), %r13
leaq .LC0(%rip), %rbx
.L23:
movl 0(%rbp), %esi
movq %r13, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %rbx, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbp
cmpq %rbp, %r12
jne .L23
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L30
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movq (%rsp), %rdi
call _Z35__device_stub__Z15increment_naivePiPi
jmp .L20
.L29:
movq (%rsp), %rdi
call _Z36__device_stub__Z16increment_atomicPiPi
jmp .L22
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z16increment_atomicPi"
.LC2:
.string "_Z15increment_naivePi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z16increment_atomicPi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z15increment_naivePi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.